code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ConditionalDetrFeatureExtractor''']
UpperCamelCase__ = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 75 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : List[str] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self :List[str] , _lowercase :List[Any] , _lowercase :Tuple=7 , _lowercase :str=3 , _lowercase :Optional[Any]=30 , _lowercase :Optional[Any]=4_00 , _lowercase :Optional[Any]=True , _lowercase :List[Any]=None , _lowercase :Any=True , _lowercase :Optional[Any]=[0.5, 0.5, 0.5] , _lowercase :Any=[0.5, 0.5, 0.5] , _lowercase :Union[str, Any]=True , _lowercase :Tuple=1 / 2_55 , _lowercase :int=True , ):
'''simple docstring'''
lowercase__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_pad
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[Any] , _lowercase :Tuple=False ):
'''simple docstring'''
if not batched:
lowercase__ = image_inputs[0]
if isinstance(_lowercase , Image.Image ):
lowercase__ , lowercase__ = image.size
else:
lowercase__ , lowercase__ = image.shape[1], image.shape[2]
if w < h:
lowercase__ = int(self.size["shortest_edge"] * h / w )
lowercase__ = self.size["shortest_edge"]
elif w > h:
lowercase__ = self.size["shortest_edge"]
lowercase__ = int(self.size["shortest_edge"] * w / h )
else:
lowercase__ = self.size["shortest_edge"]
lowercase__ = self.size["shortest_edge"]
else:
lowercase__ = []
for image in image_inputs:
lowercase__ , lowercase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__ = max(_lowercase , key=lambda _lowercase : item[0] )[0]
lowercase__ = max(_lowercase , key=lambda _lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase ( lowercase_ , unittest.TestCase ):
__lowerCamelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , "image_mean" ) )
self.assertTrue(hasattr(_lowercase , "image_std" ) )
self.assertTrue(hasattr(_lowercase , "do_normalize" ) )
self.assertTrue(hasattr(_lowercase , "do_resize" ) )
self.assertTrue(hasattr(_lowercase , "size" ) )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , _lowercase )
lowercase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowercase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , _lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
lowercase__ = image_processing(_lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ = image_processing(_lowercase , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ = image_processing(_lowercase , return_tensors="pt" ).pixel_values
lowercase__ , lowercase__ = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowercase__ = json.loads(f.read() )
lowercase__ = {"image_id": 3_97_69, "annotations": target}
# encode them
lowercase__ = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
lowercase__ = image_processing(images=_lowercase , annotations=_lowercase , return_tensors="pt" )
# verify pixel values
lowercase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , _lowercase )
lowercase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _lowercase , atol=1e-4 ) )
# verify area
lowercase__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _lowercase ) )
# verify boxes
lowercase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _lowercase )
lowercase__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _lowercase , atol=1e-3 ) )
# verify image_id
lowercase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _lowercase ) )
# verify is_crowd
lowercase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _lowercase ) )
# verify class_labels
lowercase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _lowercase ) )
# verify orig_size
lowercase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _lowercase ) )
# verify size
lowercase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _lowercase ) )
@slow
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowercase__ = json.loads(f.read() )
lowercase__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
lowercase__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowercase__ = ConditionalDetrImageProcessor(format="coco_panoptic" )
lowercase__ = image_processing(images=_lowercase , annotations=_lowercase , masks_path=_lowercase , return_tensors="pt" )
# verify pixel values
lowercase__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , _lowercase )
lowercase__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _lowercase , atol=1e-4 ) )
# verify area
lowercase__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _lowercase ) )
# verify boxes
lowercase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _lowercase )
lowercase__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _lowercase , atol=1e-3 ) )
# verify image_id
lowercase__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _lowercase ) )
# verify is_crowd
lowercase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _lowercase ) )
# verify class_labels
lowercase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _lowercase ) )
# verify masks
lowercase__ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _lowercase )
# verify orig_size
lowercase__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _lowercase ) )
# verify size
lowercase__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _lowercase ) )
| 611 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_snake_case = logging.get_logger("""transformers.models.speecht5""")
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
hf_model.apply_weight_norm()
lowercase__ = checkpoint["input_conv.weight_g"]
lowercase__ = checkpoint["input_conv.weight_v"]
lowercase__ = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowercase__ = checkpoint[f'''upsamples.{i}.1.weight_g''']
lowercase__ = checkpoint[f'''upsamples.{i}.1.weight_v''']
lowercase__ = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
lowercase__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
lowercase__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
lowercase__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
lowercase__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
lowercase__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
lowercase__ = checkpoint["output_conv.1.weight_g"]
lowercase__ = checkpoint["output_conv.1.weight_v"]
lowercase__ = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , ):
if config_path is not None:
lowercase__ = SpeechTaHifiGanConfig.from_pretrained(__magic_name__ )
else:
lowercase__ = SpeechTaHifiGanConfig()
lowercase__ = SpeechTaHifiGan(__magic_name__ )
lowercase__ = torch.load(__magic_name__ )
load_weights(orig_checkpoint["model"]["generator"] , __magic_name__ , __magic_name__ )
lowercase__ = np.load(__magic_name__ )
lowercase__ = stats[0].reshape(-1 )
lowercase__ = stats[1].reshape(-1 )
lowercase__ = torch.from_numpy(__magic_name__ ).float()
lowercase__ = torch.from_numpy(__magic_name__ ).float()
model.save_pretrained(__magic_name__ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 611 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 161 |
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def _lowerCAmelCase ( lowercase : str , lowercase : str , **lowercase : Tuple ) ->Tuple:
"""simple docstring"""
lowercase__ = AutoConfig.from_pretrained(lowercase , **lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_config(lowercase )
model.save_pretrained(lowercase )
AutoTokenizer.from_pretrained(lowercase ).save_pretrained(lowercase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 161 | 1 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase__ : Union[str, Any] = False
try:
lowercase__ : Optional[Any] = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = []):
'''simple docstring'''
__A : int = 0
__A : Union[str, Any] = choices
__A : Tuple = prompt
if sys.platform == "win32":
__A : Union[str, Any] = '*'
else:
__A : Union[str, Any] = '➔ '
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = ""):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _UpperCAmelCase)
else:
forceWrite(self.choices[index] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if index == self.position:
forceWrite(F' {self.arrow_char} ')
self.write_choice(_UpperCAmelCase)
else:
forceWrite(F' {self.choices[index]}')
reset_cursor()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 1):
'''simple docstring'''
__A : List[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_UpperCAmelCase)
move_cursor(_UpperCAmelCase , direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP['up'])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.move_direction(Direction.UP)
@input.mark(KEYMAP['down'])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP['newline'])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position , 'DOWN')
return self.position
@input.mark(KEYMAP['interrupt'])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
move_cursor(len(self.choices) - self.position , 'DOWN')
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_UpperCAmelCase)] for number in range(10)])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = int(chr(self.current_selection))
__A : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP , -movement)
elif self.position < index:
self.move_direction(Direction.DOWN , _UpperCAmelCase)
else:
return
else:
return
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase = 0):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n')
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n')
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n')
__A : str = default_choice
for i in range(len(self.choices)):
self.print_choice(_UpperCAmelCase)
forceWrite('\n')
move_cursor(len(self.choices) - self.position , 'UP')
with cursor.hide():
while True:
if in_colab:
try:
__A : Optional[Any] = int(builtins.input())
except ValueError:
__A : int = default_choice
else:
__A : Dict = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1 , 'UP')
clear_line()
self.write_choice(_UpperCAmelCase , '\n')
return choice | 705 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __snake_case : str , __snake_case : list[str] | None = None ) -> list[list[str]]:
__A : List[str] = word_bank or []
# create a table
__A : int = len(__snake_case ) + 1
__A : list[list[list[str]]] = []
for _ in range(__snake_case ):
table.append([] )
# seed value
__A : List[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__snake_case ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__snake_case )] == word:
__A : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__snake_case )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__snake_case )]:
combination.reverse()
return table[len(__snake_case )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 338 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] = VideoToVideoSDPipeline
snake_case__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
snake_case__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
snake_case__ : List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case__ : str = False
# No `output_type`.
snake_case__ : Optional[Any] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCAmelCase_ ( self : str ) -> Tuple:
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=3_2 , attention_head_dim=4 , )
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCAmelCase__ , set_alpha_to_one=UpperCAmelCase__ , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
__SCREAMING_SNAKE_CASE = CLIPTextModel(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__SCREAMING_SNAKE_CASE = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=0 ) -> int:
# 3 frames
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
if str(UpperCAmelCase__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCAmelCase_ ( self : int ) -> List[Any]:
__SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "np"
__SCREAMING_SNAKE_CASE = sd_pipe(**UpperCAmelCase__ ).frames
__SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
__SCREAMING_SNAKE_CASE = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase__ , expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCAmelCase_ ( self : str ) -> int:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
pass
def UpperCAmelCase_ ( self : str ) -> Tuple:
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Any ) -> List[str]:
__SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__SCREAMING_SNAKE_CASE = torch.Generator(device="cpu" ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = video.to("cuda" )
__SCREAMING_SNAKE_CASE = "Spiderman is surfing"
__SCREAMING_SNAKE_CASE = pipe(UpperCAmelCase__ , video=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=3 , output_type="pt" ).frames
__SCREAMING_SNAKE_CASE = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 682 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
a__ : int = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
a__ : Union[str, Any] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
a__ : Optional[Any] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
def remove_articles(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.compile(R"\b(a|an|the)\b" , re.UNICODE )
return re.sub(lowerCAmelCase_ , " " , lowerCAmelCase_ )
def white_space_fix(lowerCAmelCase_ ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [any(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
return (sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )) * 100
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [rgram for rgrams in rgramslist for rgram in rgrams]
__SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = Counter()
for sgram, scount in sgramcounter.items():
__SCREAMING_SNAKE_CASE = scount * numref
__SCREAMING_SNAKE_CASE = Counter(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = Counter()
for cgram, ccount in cgramcounter.items():
__SCREAMING_SNAKE_CASE = ccount * numref
# KEEP
__SCREAMING_SNAKE_CASE = sgramcounter_rep & cgramcounter_rep
__SCREAMING_SNAKE_CASE = keepgramcounter_rep & rgramcounter
__SCREAMING_SNAKE_CASE = sgramcounter_rep & rgramcounter
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
if len(lowerCAmelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = keeptmpscorea / len(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__SCREAMING_SNAKE_CASE = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__SCREAMING_SNAKE_CASE = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__SCREAMING_SNAKE_CASE = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__SCREAMING_SNAKE_CASE = sgramcounter_rep - cgramcounter_rep
__SCREAMING_SNAKE_CASE = delgramcounter_rep - rgramcounter
__SCREAMING_SNAKE_CASE = sgramcounter_rep - rgramcounter
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__SCREAMING_SNAKE_CASE = 1
if len(lowerCAmelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = deltmpscorea / len(lowerCAmelCase_ )
# ADDITION
__SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) & set(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
if len(lowerCAmelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = addtmpscore / len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = 0
if addscore_precision > 0 or addscore_recall > 0:
__SCREAMING_SNAKE_CASE = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = ssent.split(" " )
__SCREAMING_SNAKE_CASE = csent.split(" " )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for rsent in rsents:
__SCREAMING_SNAKE_CASE = rsent.split(" " )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
ragramslist.append(lowerCAmelCase_ )
for i in range(0 , len(lowerCAmelCase_ ) - 1 ):
if i < len(lowerCAmelCase_ ) - 1:
__SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 2:
__SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 3:
__SCREAMING_SNAKE_CASE = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(lowerCAmelCase_ )
ragramslist.append(lowerCAmelCase_ )
ragramslist.append(lowerCAmelCase_ )
ragramslist.append(lowerCAmelCase_ )
for i in range(0 , len(lowerCAmelCase_ ) - 1 ):
if i < len(lowerCAmelCase_ ) - 1:
__SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 2:
__SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 3:
__SCREAMING_SNAKE_CASE = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(lowerCAmelCase_ )
for i in range(0 , len(lowerCAmelCase_ ) - 1 ):
if i < len(lowerCAmelCase_ ) - 1:
__SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 2:
__SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(lowerCAmelCase_ )
if i < len(lowerCAmelCase_ ) - 3:
__SCREAMING_SNAKE_CASE = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(lowerCAmelCase_ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = SARIngram(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__SCREAMING_SNAKE_CASE = sum([delascore, delascore, delascore, delascore] ) / 4
__SCREAMING_SNAKE_CASE = sum([addascore, addascore, addascore, addascore] ) / 4
__SCREAMING_SNAKE_CASE = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = "13a" , lowerCAmelCase_ = True ):
'''simple docstring'''
if lowercase:
__SCREAMING_SNAKE_CASE = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__SCREAMING_SNAKE_CASE = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase_ )()(lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase_ )
elif tokenizer == "moses":
__SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ , escape=lowerCAmelCase_ )
elif tokenizer == "penn":
__SCREAMING_SNAKE_CASE = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase_ , return_str=lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = sentence
if not return_str:
__SCREAMING_SNAKE_CASE = normalized_sent.split()
return normalized_sent
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if not (len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )):
raise ValueError("Sources length must match predictions and references lengths." )
__SCREAMING_SNAKE_CASE = 0
for src, pred, refs in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
sari_score += SARIsent(normalize(lowerCAmelCase_ ) , normalize(lowerCAmelCase_ ) , [normalize(lowerCAmelCase_ ) for sent in refs] )
__SCREAMING_SNAKE_CASE = sari_score / len(lowerCAmelCase_ )
return 100 * sari_score
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="exp" , lowerCAmelCase_=None , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(references[0] )
if any(len(lowerCAmelCase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase_ )]
__SCREAMING_SNAKE_CASE = sacrebleu.corpus_bleu(
lowerCAmelCase_ , lowerCAmelCase_ , smooth_method=lowerCAmelCase_ , smooth_value=lowerCAmelCase_ , force=lowerCAmelCase_ , lowercase=lowerCAmelCase_ , use_effective_order=lowerCAmelCase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCamelCase_ ( datasets.Metric):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = {}
result.update({"sari": compute_sari(sources=UpperCAmelCase__ , predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} )
result.update({"exact": compute_em(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )} )
return result
| 682 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCamelCase ( _snake_case ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 'time_series_transformer'
UpperCAmelCase_ : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase__ = None , lowercase__ = None , lowercase__ = "student_t" , lowercase__ = "nll" , lowercase__ = 1 , lowercase__ = [1, 2, 3, 4, 5, 6, 7] , lowercase__ = "mean" , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = 32 , lowercase__ = 32 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = 2 , lowercase__ = True , lowercase__ = "gelu" , lowercase__ = 64 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 0.1 , lowercase__ = 100 , lowercase__ = 0.02 , lowercase__=True , **lowercase__ , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = prediction_length
SCREAMING_SNAKE_CASE = context_length or prediction_length
SCREAMING_SNAKE_CASE = distribution_output
SCREAMING_SNAKE_CASE = loss
SCREAMING_SNAKE_CASE = input_size
SCREAMING_SNAKE_CASE = num_time_features
SCREAMING_SNAKE_CASE = lags_sequence
SCREAMING_SNAKE_CASE = scaling
SCREAMING_SNAKE_CASE = num_dynamic_real_features
SCREAMING_SNAKE_CASE = num_static_real_features
SCREAMING_SNAKE_CASE = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
SCREAMING_SNAKE_CASE = cardinality
else:
SCREAMING_SNAKE_CASE = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
SCREAMING_SNAKE_CASE = embedding_dimension
else:
SCREAMING_SNAKE_CASE = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE = input_size * len(lowercase__ ) + self._number_of_features
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = use_cache
super().__init__(is_encoder_decoder=lowercase__ , **lowercase__ )
@property
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 713 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
snake_case = logging.get_logger(__name__)
snake_case = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__ = None ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = max_position_embeddings
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE = input_ids.shape[-1]
SCREAMING_SNAKE_CASE = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'exceptions, performance degradation, or nothing at all.' )
return is_done
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__ ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'with `max_length = start_length + max_new_tokens` instead.' , lowercase__ , )
SCREAMING_SNAKE_CASE = start_length
SCREAMING_SNAKE_CASE = max_new_tokens
SCREAMING_SNAKE_CASE = start_length + max_new_tokens
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self , lowercase__ , lowercase__ = None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = max_time
SCREAMING_SNAKE_CASE = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
@add_start_docstrings(lowercase__ )
def __call__( self , lowercase__ , lowercase__ , **lowercase__ ) -> bool:
"""simple docstring"""
return any(criteria(lowercase__ , lowercase__ ) for criteria in self )
@property
def A ( self ) -> Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(lowercase__ , lowercase__ ):
return stopping_criterium.max_length
elif isinstance(lowercase__ , lowercase__ ):
return stopping_criterium.max_length
return None
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = stopping_criteria.max_length
SCREAMING_SNAKE_CASE = deepcopy(SCREAMING_SNAKE_CASE_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter', SCREAMING_SNAKE_CASE_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE_ ) )
return new_stopping_criteria
| 406 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ) -> Union[str, Any]:
_lowerCamelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_lowerCamelCase = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(snake_case__ ) , torch_builtin(snake_case__ ) ) )
self.assertFalse(torch.allclose(gelu_python(snake_case__ ) , gelu_new(snake_case__ ) ) )
def _snake_case ( self : Dict ) -> Union[str, Any]:
_lowerCamelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_lowerCamelCase = get_activation('gelu' )
_lowerCamelCase = get_activation('gelu_10' )
_lowerCamelCase = torch_builtin(snake_case__ )
_lowerCamelCase = geluaa(snake_case__ )
_lowerCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(snake_case__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _snake_case ( self : str ) -> Optional[Any]:
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(snake_case__ ):
get_activation('bogus' )
with self.assertRaises(snake_case__ ):
get_activation(snake_case__ )
def _snake_case ( self : str ) -> List[str]:
_lowerCamelCase = get_activation('gelu' )
_lowerCamelCase = 1
_lowerCamelCase = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(snake_case__ ):
_lowerCamelCase = acta.a | 544 | import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
A = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def lowerCamelCase ( UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
return torch.atana(UpperCamelCase , UpperCamelCase ) / math.pi * 2
def lowerCamelCase ( UpperCamelCase : str ) -> Union[str, Any]:
_lowerCamelCase = torch.sin(t * math.pi / 2 ) ** 2
_lowerCamelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCamelCase , UpperCamelCase )
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , snake_case__ : Any ) -> Optional[Any]:
super().__init__()
_lowerCamelCase = DiffusionAttnUnetaD(snake_case__ , n_attn_layers=4 )
_lowerCamelCase = deepcopy(self.diffusion )
_lowerCamelCase = torch.quasirandom.SobolEngine(1 , scramble=snake_case__ )
def lowerCamelCase ( UpperCamelCase : List[Any] ) -> List[str]:
_lowerCamelCase = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
A = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
A = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
A = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
A = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
A = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
A = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def lowerCamelCase ( UpperCamelCase : Tuple ) -> int:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> Tuple:
for key, value in ATTN_MAP.items():
if name.startswith(UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ):
return name.replace(UpperCamelCase , UpperCamelCase )
elif name.startswith(UpperCamelCase ):
return [name.replace(UpperCamelCase , UpperCamelCase ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def lowerCamelCase ( UpperCamelCase : Any , UpperCamelCase : int=13 ) -> Optional[int]:
_lowerCamelCase = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
_lowerCamelCase = 0
if string.startswith('net.3.' ):
depth += 1
_lowerCamelCase = string[6:]
elif string.startswith('net.' ):
_lowerCamelCase = string[4:]
while string.startswith('main.7.' ):
depth += 1
_lowerCamelCase = string[7:]
if string.startswith('main.' ):
_lowerCamelCase = string[5:]
# mid block
if string[:2].isdigit():
_lowerCamelCase = string[:2]
_lowerCamelCase = string[2:]
else:
_lowerCamelCase = string[0]
_lowerCamelCase = string[1:]
if depth == max_depth:
_lowerCamelCase = MID_NUM_TO_LAYER[layer_num]
_lowerCamelCase = 'mid_block'
elif depth > 0 and int(UpperCamelCase ) < 7:
_lowerCamelCase = DOWN_NUM_TO_LAYER[layer_num]
_lowerCamelCase = F"""down_blocks.{depth}"""
elif depth > 0 and int(UpperCamelCase ) > 7:
_lowerCamelCase = UP_NUM_TO_LAYER[layer_num]
_lowerCamelCase = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
_lowerCamelCase = DEPTH_0_TO_LAYER[layer_num]
_lowerCamelCase = F"""up_blocks.{max_depth - 1}""" if int(UpperCamelCase ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
_lowerCamelCase = string_left[1:]
if "resnets" in new_layer:
_lowerCamelCase = convert_resconv_naming(UpperCamelCase )
elif "attentions" in new_layer:
_lowerCamelCase = convert_attn_naming(UpperCamelCase )
_lowerCamelCase = new_string_left
if not isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = prefix + '.' + new_layer + '.' + string_left
else:
_lowerCamelCase = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowerCamelCase ( UpperCamelCase : List[Any] ) -> int:
_lowerCamelCase = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
_lowerCamelCase = rename(UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = transform_conv_attns(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
_lowerCamelCase = v
return new_state_dict
def lowerCamelCase ( UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Dict ) -> Optional[Any]:
if len(UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
_lowerCamelCase = v[:, :, 0]
else:
# bias
_lowerCamelCase = v
else:
# qkv matrices
_lowerCamelCase = v.shape[0]
_lowerCamelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_lowerCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_lowerCamelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCamelCase ( UpperCamelCase : Any ) -> Optional[Any]:
_lowerCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowerCamelCase = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
_lowerCamelCase = download(UpperCamelCase )
_lowerCamelCase = MODELS_MAP[model_name]['sample_rate']
_lowerCamelCase = MODELS_MAP[model_name]['sample_size']
_lowerCamelCase = Object()
_lowerCamelCase = sample_size
_lowerCamelCase = sample_rate
_lowerCamelCase = 0
_lowerCamelCase = UNetaDModel(sample_size=UpperCamelCase , sample_rate=UpperCamelCase )
_lowerCamelCase = diffusers_model.state_dict()
_lowerCamelCase = DiffusionUncond(UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCamelCase )['state_dict'] )
_lowerCamelCase = orig_model.diffusion_ema.eval()
_lowerCamelCase = orig_model.state_dict()
_lowerCamelCase = rename_orig_weights(UpperCamelCase )
_lowerCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_lowerCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCamelCase ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(UpperCamelCase ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
_lowerCamelCase = value.squeeze()
_lowerCamelCase = value
diffusers_model.load_state_dict(UpperCamelCase )
_lowerCamelCase = 1_00
_lowerCamelCase = 33
_lowerCamelCase = IPNDMScheduler(num_train_timesteps=UpperCamelCase )
_lowerCamelCase = torch.manual_seed(UpperCamelCase )
_lowerCamelCase = torch.randn([1, 2, config.sample_size] , generator=UpperCamelCase ).to(UpperCamelCase )
_lowerCamelCase = torch.linspace(1 , 0 , steps + 1 , device=UpperCamelCase )[:-1]
_lowerCamelCase = get_crash_schedule(UpperCamelCase )
_lowerCamelCase = DanceDiffusionPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
_lowerCamelCase = torch.manual_seed(33 )
_lowerCamelCase = pipe(num_inference_steps=UpperCamelCase , generator=UpperCamelCase ).audios
_lowerCamelCase = sampling.iplms_sample(UpperCamelCase , UpperCamelCase , UpperCamelCase , {} )
_lowerCamelCase = generated.clamp(-1 , 1 )
_lowerCamelCase = (generated - audio).abs().sum()
_lowerCamelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , UpperCamelCase )
print('Diff max' , UpperCamelCase )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
A = parser.parse_args()
main(args) | 544 | 1 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
_A = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
_A = {
'vinai/phobert-base': 2_5_6,
'vinai/phobert-large': 2_5_6,
}
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = set()
lowerCamelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase : Dict = char
lowerCamelCase : Union[str, Any] = set(a_ )
return pairs
class _lowercase ( __UpperCAmelCase ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , **UpperCAmelCase_ , ) -> Dict:
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : List[str] = merges_file
lowerCamelCase : List[Any] = {}
lowerCamelCase : List[str] = 0
lowerCamelCase : int = 1
lowerCamelCase : Any = 2
lowerCamelCase : str = 3
self.add_from_file(UpperCAmelCase_ )
lowerCamelCase : List[Any] = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8' ) as merges_handle:
lowerCamelCase : Any = merges_handle.read().split('\n' )[:-1]
lowerCamelCase : Dict = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCamelCase : Dict = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCamelCase : Any = {}
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : Dict = [self.cls_token_id]
lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> List[int]:
lowerCamelCase : int = [self.sep_token_id]
lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self ) -> Optional[int]:
return len(self.encoder )
def _UpperCamelCase ( self ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Any:
if token in self.cache:
return self.cache[token]
lowerCamelCase : Tuple = tuple(UpperCAmelCase_ )
lowerCamelCase : Dict = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
lowerCamelCase : List[str] = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
lowerCamelCase : Optional[Any] = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase : Union[str, Any] = bigram
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Tuple = 0
while i < len(UpperCAmelCase_ ):
try:
lowerCamelCase : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase : int = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase : Union[str, Any] = tuple(UpperCAmelCase_ )
lowerCamelCase : List[Any] = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
lowerCamelCase : Dict = get_pairs(UpperCAmelCase_ )
lowerCamelCase : List[Any] = '@@ '.join(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = word[:-4]
lowerCamelCase : Tuple = word
return word
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Any:
lowerCamelCase : Dict = []
lowerCamelCase : Union[str, Any] = re.findall(r'\S+\n?' , UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(' ' ) ) )
return split_tokens
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Tuple:
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> List[Any]:
return self.decoder.get(UpperCAmelCase_ , self.unk_token )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Any:
lowerCamelCase : List[Any] = ' '.join(UpperCAmelCase_ ).replace('@@ ' , '' ).strip()
return out_string
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : int = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase : Any = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.merges_file , UpperCAmelCase_ )
return out_vocab_file, out_merge_file
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Union[str, Any]:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
try:
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(UpperCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowerCamelCase : Any = f.readlines()
for lineTmp in lines:
lowerCamelCase : Dict = lineTmp.strip()
lowerCamelCase : Union[str, Any] = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
lowerCamelCase : str = line[:idx]
lowerCamelCase : Any = len(self.encoder )
| 133 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( __UpperCAmelCase ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'BlipImageProcessor'
lowercase_ = 'AutoTokenizer'
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> Dict:
lowerCamelCase : Tuple = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : List[Any] = self.image_processor
def __call__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
lowerCamelCase : str = self.tokenizer
lowerCamelCase : List[str] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
lowerCamelCase : Any = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
if text is not None:
lowerCamelCase : List[Any] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
lowerCamelCase : str = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> str:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 133 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : List[str]=18 , SCREAMING_SNAKE_CASE__ : Optional[int]=30 , SCREAMING_SNAKE_CASE__ : List[Any]=4_00 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[str]=True , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = apply_ocr
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = LayoutLMvaImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'apply_ocr' ) )
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE__ )
# Test batched
UpperCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
UpperCamelCase = Image.open(ds[0]['file'] ).convert('RGB' )
UpperCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
UpperCamelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE__ )
self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE__ )
# with apply_OCR = False
UpperCamelCase = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 282 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="openai/whisper-base"
SCREAMING_SNAKE_CASE_ : List[str] =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
SCREAMING_SNAKE_CASE_ : int ="transcriber"
SCREAMING_SNAKE_CASE_ : Tuple =WhisperProcessor
SCREAMING_SNAKE_CASE_ : List[Any] =WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE_ : Union[str, Any] =["audio"]
SCREAMING_SNAKE_CASE_ : Any =["text"]
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
return self.pre_processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).input_features
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
return self.model.generate(inputs=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
return self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )[0]
| 282 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCAmelCase : str = logging.get_logger(__name__)
class lowercase :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None ) -> str:
"""simple docstring"""
if not conversation_id:
UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
UpperCamelCase = []
if generated_responses is None:
UpperCamelCase = []
UpperCamelCase = conversation_id
UpperCamelCase = past_user_inputs
UpperCamelCase = generated_responses
UpperCamelCase = text
def __eq__( self , A_ ) -> Optional[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCamelCase ( self , A_ , A_ = False ) -> Dict:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
UpperCamelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCamelCase = text
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase = None
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
self.generated_responses.append(A_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> str:
"""simple docstring"""
UpperCamelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCamelCase = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
_SCREAMING_SNAKE_CASE , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , *A_ , **A_ ) -> List[str]:
"""simple docstring"""
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
UpperCamelCase = self.tokenizer.eos_token
def __UpperCamelCase ( self , A_=None , A_=None , A_=None , **A_ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
if min_length_for_response is not None:
UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def __UpperCamelCase ( self , A_ , A_=32 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
UpperCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCamelCase ( self , A_ , A_=10 , **A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
UpperCamelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCamelCase = max_length - minimum_tokens
UpperCamelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase = model_inputs['attention_mask'][:, -trim:]
UpperCamelCase = model_inputs.pop('conversation' )
UpperCamelCase = max_length
UpperCamelCase = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
UpperCamelCase = 1
else:
UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCamelCase ( self , A_ , A_=True ) -> List[str]:
"""simple docstring"""
UpperCamelCase = model_outputs['output_ids']
UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
UpperCamelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def __UpperCamelCase ( self , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.tokenizer.eos_token_id
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 3 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , A_ , A_ = None , A_ = None ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase = torch.zeros(A_ , A_ )
else:
UpperCamelCase = None
UpperCamelCase = torch.nn.Parameter(A_ )
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : VQModel
__lowercase : CLIPTextModel
__lowercase : CLIPTokenizer
__lowercase : TransformeraDModel
__lowercase : LearnedClassifierFreeSamplingEmbeddings
__lowercase : VQDiffusionScheduler
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=A_ , transformer=A_ , text_encoder=A_ , tokenizer=A_ , scheduler=A_ , learned_classifier_free_sampling_embeddings=A_ , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = len(A_ ) if isinstance(A_ , A_ ) else 1
# get prompt text embeddings
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase = prompt_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(A_ , 1 , 1 )
else:
UpperCamelCase = [''] * batch_size
UpperCamelCase = text_input_ids.shape[-1]
UpperCamelCase = self.tokenizer(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='pt' , )
UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=A_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase = negative_prompt_embeds.shape[1]
UpperCamelCase = negative_prompt_embeds.repeat(1 , A_ , 1 )
UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , A_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , A_ , A_ = 100 , A_ = 5.0 , A_ = 1.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(A_ , A_ ):
UpperCamelCase = 1
elif isinstance(A_ , A_ ):
UpperCamelCase = len(A_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(A_ )}''' )
UpperCamelCase = batch_size * num_images_per_prompt
UpperCamelCase = guidance_scale > 1.0
UpperCamelCase = self._encode_prompt(A_ , A_ , A_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A_ , A_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(A_ )}.''' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase = self.transformer.num_vector_embeds - 1
UpperCamelCase = torch.full(A_ , A_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A_ , device=self.device )
UpperCamelCase = self.scheduler.timesteps.to(self.device )
UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase = self.transformer(A_ , encoder_hidden_states=A_ , timestep=A_ ).sample
if do_classifier_free_guidance:
UpperCamelCase , UpperCamelCase = model_output.chunk(2 )
UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(A_ , dim=1 , keepdim=A_ )
UpperCamelCase = self.truncate(A_ , A_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(A_ , timestep=A_ , sample=A_ , generator=A_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A_ , A_ , A_ )
UpperCamelCase = self.vqvae.config.vq_embed_dim
UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase = self.vqvae.quantize.get_codebook_entry(A_ , shape=A_ )
UpperCamelCase = self.vqvae.decode(A_ , force_not_quantize=A_ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
def __UpperCamelCase ( self , A_ , A_ ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = torch.sort(A_ , 1 , descending=A_ )
UpperCamelCase = torch.exp(A_ )
UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , A_ )
UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase = keep_mask[:, :-1, :]
UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase = log_p_x_0.clone()
UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 3 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase_ :
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ):
_snake_case : Optional[int] = parent
_snake_case : str = batch_size
_snake_case : int = seq_length
_snake_case : Dict = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Union[str, Any] = eos_token_id
_snake_case : str = pad_token_id
_snake_case : Any = bos_token_id
_snake_case : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase ( self ):
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
_snake_case : int = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
_snake_case : List[Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder()
_snake_case : Optional[Any] = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : int = inputs_dict["attention_mask"][:1, :]
_snake_case : int = 1
# first forward pass
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFLEDModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] )
_snake_case : Tuple = 2
_snake_case : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_snake_case : Tuple = True
_snake_case : Union[str, Any] = self.model_tester.seq_length
_snake_case : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ ):
_snake_case : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase_ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : Any = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Tuple = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
_snake_case : int = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : List[Any] = True
_snake_case : Any = model_class(lowercase_ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
_snake_case : Optional[int] = True
_snake_case : Optional[int] = True
_snake_case : List[Any] = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# TODO: Head-masking not yet implement
pass
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = 1E-4
@slow
@require_tf
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Optional[Any] = model(**lowercase_ )[0]
_snake_case : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 )
def UpperCamelCase ( self ):
_snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Tuple = model(**lowercase_ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 ) | 670 | # NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 670 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase__ ( A : List[str] ):
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
for char in word:
UpperCAmelCase = ord(A )
if not _is_chinese_char(A ):
return 0
return 1
def lowerCamelCase__ ( A : List[str] ):
'''simple docstring'''
UpperCAmelCase = set()
for token in tokens:
UpperCAmelCase = len(A ) > 1 and is_chinese(A )
if chinese_word:
word_set.add(A )
UpperCAmelCase = list(A )
return word_list
def lowerCamelCase__ ( A : List[str] , A : set() ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCAmelCase = max([len(A ) for w in chinese_word_set] )
UpperCAmelCase = bert_tokens
UpperCAmelCase , UpperCAmelCase = 0, len(A )
while start < end:
UpperCAmelCase = True
if is_chinese(bert_word[start] ):
UpperCAmelCase = min(end - start , A )
for i in range(A , 1 , -1 ):
UpperCAmelCase = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase = '''##''' + bert_word[j]
UpperCAmelCase = start + i
UpperCAmelCase = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( A : List[str] , A : LTP , A : BertTokenizer ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(0 , len(A ) , 1_00 ):
UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['''cws'''] ).cws
UpperCAmelCase = [get_chinese_word(A ) for r in res]
ltp_res.extend(A )
assert len(A ) == len(A )
UpperCAmelCase = []
for i in range(0 , len(A ) , 1_00 ):
UpperCAmelCase = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=A , truncation=A , max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(A ) == len(A )
UpperCAmelCase = []
for input_ids, chinese_word in zip(A , A ):
UpperCAmelCase = []
for id in input_ids:
UpperCAmelCase = bert_tokenizer._convert_id_to_token(A )
input_tokens.append(A )
UpperCAmelCase = add_sub_symbol(A , A )
UpperCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(A ):
if token[:2] == "##":
UpperCAmelCase = token[2:]
# save chinese tokens' pos
if len(A ) == 1 and _is_chinese_char(ord(A ) ):
ref_id.append(A )
ref_ids.append(A )
assert len(A ) == len(A )
return ref_ids
def lowerCamelCase__ ( A : List[str] ):
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [line.strip() for line in data if len(A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase = prepare_ref(A , A , A )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase = [json.dumps(A ) + '''\n''' for ref in ref_ids]
f.writelines(A )
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
_lowercase : Optional[int] = parser.parse_args()
main(args)
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : str = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: str = (3, 32, 128)
SCREAMING_SNAKE_CASE_: List[str] = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[int] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
SCREAMING_SNAKE_CASE_: Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: Optional[Any] = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
SCREAMING_SNAKE_CASE_: List[str] = os.path.join(self.tmpdirname , lowerCAmelCase__)
with open(self.image_processor_file , "w" , encoding="utf-8") as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowerCAmelCase__ : Any):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowerCAmelCase__ : Any):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
SCREAMING_SNAKE_CASE_: int = Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE_: Tuple = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_: List[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_: List[str] = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_: List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
SCREAMING_SNAKE_CASE_: List[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0)
SCREAMING_SNAKE_CASE_: Optional[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: int = self.get_image_processor()
SCREAMING_SNAKE_CASE_: int = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Any = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(lowerCAmelCase__ , return_tensors="np")
SCREAMING_SNAKE_CASE_: str = processor(images=lowerCAmelCase__ , return_tensors="np")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_: Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = "test"
SCREAMING_SNAKE_CASE_: int = processor(text=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer(lowerCAmelCase__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: str = self.get_image_processor()
SCREAMING_SNAKE_CASE_: Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Any = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = "test"
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: Dict = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()) , ["pixel_values", "labels"])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE_: str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_: Dict = processor.char_decode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.batch_decode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = [seq.replace(" " , "") for seq in decoded_tok]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE_: Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Optional[Any] = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = None
SCREAMING_SNAKE_CASE_: Optional[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: Optional[Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[Any] = MgpstrProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.randn(1 , 27 , 38)
SCREAMING_SNAKE_CASE_: Tuple = torch.randn(1 , 27 , 5_0257)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.randn(1 , 27 , 3_0522)
SCREAMING_SNAKE_CASE_: int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"])
| 671 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = '''upernet'''
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type")
SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = backbone_config
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Any = pool_scales
SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels
SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs
SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input
SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type
return output
| 671 | 1 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__UpperCAmelCase = 10
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
SCREAMING_SNAKE_CASE : int = one_third - 1
elif array[two_third] < target:
SCREAMING_SNAKE_CASE : Any = two_third + 1
else:
SCREAMING_SNAKE_CASE : int = one_third + 1
SCREAMING_SNAKE_CASE : Dict = two_third - 1
else:
return -1
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
__UpperCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__UpperCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
__UpperCAmelCase = ite_ternary_search(collection, target)
__UpperCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 707 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''vivit'''
def __init__( self : Tuple , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Tuple=[2, 16, 16] , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : List[Any]=30_72 , lowerCamelCase_ : List[str]="gelu_fast" , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : List[Any]=1e-06 , lowerCamelCase_ : Tuple=True , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : str = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Dict = num_frames
SCREAMING_SNAKE_CASE : Optional[Any] = tubelet_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : int = qkv_bias
super().__init__(**lowerCamelCase_ )
| 79 | 0 |
from math import pow, sqrt
def snake_case ( *snake_case__ :float) -> bool:
_A = len(snake_case__) > 0 and all(value > 0.0 for value in values)
return result
def snake_case ( snake_case__ :float , snake_case__ :float) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(snake_case__ , snake_case__)
else ValueError("""Input Error: Molar mass values must greater than 0.""")
)
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :float) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(snake_case__ , snake_case__ , snake_case__)
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""")
)
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :float) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(snake_case__ , snake_case__ , snake_case__)
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""")
)
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :float) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2) , 6)
if validate(snake_case__ , snake_case__ , snake_case__)
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""")
)
def snake_case ( snake_case__ :float , snake_case__ :float , snake_case__ :float) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2) / molar_mass , 6)
if validate(snake_case__ , snake_case__ , snake_case__)
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""")
)
| 401 | import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = IFPipeline
lowerCamelCase :Optional[Any] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
lowerCamelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase :Optional[int] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase ( self ) -> List[Any]:
return self._get_dummy_components()
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> str:
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_A = torch.manual_seed(lowerCAmelCase_ )
else:
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase ( self ) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase ( self ) -> List[str]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase ( self ) -> Tuple:
self._test_save_load_local()
def UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> Union[str, Any]:
# if
_A = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
_A = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
_A , _A = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_A = None
_A = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_A = IFImgaImgPipeline(**pipe_a.components )
_A = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_A = IFInpaintingPipeline(**pipe_a.components )
_A = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , num_inference_steps=2 , generator=lowerCAmelCase_ , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = pipe_a(
prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
# pipeline 1
_start_torch_memory_measurement()
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , image=lowerCAmelCase_ , num_inference_steps=2 , generator=lowerCAmelCase_ , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = pipe_a(
prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , image=lowerCAmelCase_ , original_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase_ )
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = pipe_a(
prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , num_inference_steps=2 , generator=lowerCAmelCase_ , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (64, 64, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
# pipeline 2
_start_torch_memory_measurement()
_A = torch.Generator(device="""cpu""" ).manual_seed(0 )
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowerCAmelCase_ )
_A = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(lowerCAmelCase_ )
_A = pipe_a(
prompt_embeds=lowerCAmelCase_ , negative_prompt_embeds=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , original_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type="""np""" , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case ( ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 401 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case__ = 400_0000 ) -> int:
__UpperCAmelCase =[]
__UpperCAmelCase , __UpperCAmelCase =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase =b, a + b
return sum(snake_case__ )
if __name__ == "__main__":
print(f'{solution() = }')
| 142 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Tuple = '''align_text_model'''
def __init__(self , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=True , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase)
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =hidden_act
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =position_embedding_type
__UpperCAmelCase =use_cache
__UpperCAmelCase =pad_token_id
@classmethod
def A__ (cls , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase)
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''') == "align":
__UpperCAmelCase =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase)
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Dict = '''align_vision_model'''
def __init__(self , UpperCAmelCase = 3 , UpperCAmelCase = 6_0_0 , UpperCAmelCase = 2.0 , UpperCAmelCase = 3.1 , UpperCAmelCase = 8 , UpperCAmelCase = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , UpperCAmelCase = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , UpperCAmelCase = [] , UpperCAmelCase = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase = 0.25 , UpperCAmelCase = "swish" , UpperCAmelCase = 2_5_6_0 , UpperCAmelCase = "mean" , UpperCAmelCase = 0.02 , UpperCAmelCase = 0.001 , UpperCAmelCase = 0.99 , UpperCAmelCase = 0.2 , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase)
__UpperCAmelCase =num_channels
__UpperCAmelCase =image_size
__UpperCAmelCase =width_coefficient
__UpperCAmelCase =depth_coefficient
__UpperCAmelCase =depth_divisor
__UpperCAmelCase =kernel_sizes
__UpperCAmelCase =in_channels
__UpperCAmelCase =out_channels
__UpperCAmelCase =depthwise_padding
__UpperCAmelCase =strides
__UpperCAmelCase =num_block_repeats
__UpperCAmelCase =expand_ratios
__UpperCAmelCase =squeeze_expansion_ratio
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dim
__UpperCAmelCase =pooling_type
__UpperCAmelCase =initializer_range
__UpperCAmelCase =batch_norm_eps
__UpperCAmelCase =batch_norm_momentum
__UpperCAmelCase =drop_connect_rate
__UpperCAmelCase =sum(UpperCAmelCase) * 4
@classmethod
def A__ (cls , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase)
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''') == "align":
__UpperCAmelCase =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase)
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : int = '''align'''
a_ : Optional[Any] = True
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=6_4_0 , UpperCAmelCase=1.0 , UpperCAmelCase=0.02 , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase)
if text_config is None:
__UpperCAmelCase ={}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''')
if vision_config is None:
__UpperCAmelCase ={}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''')
__UpperCAmelCase =AlignTextConfig(**UpperCAmelCase)
__UpperCAmelCase =AlignVisionConfig(**UpperCAmelCase)
__UpperCAmelCase =projection_dim
__UpperCAmelCase =temperature_init_value
__UpperCAmelCase =initializer_range
@classmethod
def A__ (cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase)
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =copy.deepcopy(self.__dict__)
__UpperCAmelCase =self.text_config.to_dict()
__UpperCAmelCase =self.vision_config.to_dict()
__UpperCAmelCase =self.__class__.model_type
return output
| 142 | 1 |
from bisect import bisect
from itertools import accumulate
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = sorted(zip(lowerCamelCase , lowerCamelCase ) , key=lambda lowerCamelCase : x[0] / x[1] , reverse=lowerCamelCase )
__lowercase , __lowercase = [i[0] for i in r], [i[1] for i in r]
__lowercase = list(accumulate(lowerCamelCase ) )
__lowercase = bisect(lowerCamelCase , lowerCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Tuple = {'configuration_timm_backbone': ['TimmBackboneConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ['TimmBackbone']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 529 | 0 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__UpperCAmelCase =get_logger(__name__)
__UpperCAmelCase =r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class lowerCAmelCase__ :
@add_start_docstrings(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class lowerCAmelCase__ :
@add_start_docstrings(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
@add_start_docstrings(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
for processor in self:
A__ = inspect.signature(processor.__call__ ).parameters
if len(UpperCamelCase__ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"""Make sure that all the required parameters: {list(function_args.keys() )} for """
f"""{processor.__class__} are passed to the logits processor.""" )
A__ = processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
else:
A__ = processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return scores
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not (temperature > 0):
raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" )
A__ = temperature
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = scores / self.temperature
return scores
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = -float("Inf" ) , UpperCamelCase__ = 1 ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or (min_tokens_to_keep < 1):
raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
A__ = top_p
A__ = filter_value
A__ = min_tokens_to_keep
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ , A__ = lax.top_k(UpperCamelCase__ , scores.shape[-1] )
A__ = jnp.full_like(UpperCamelCase__ , self.filter_value )
A__ = jax.nn.softmax(UpperCamelCase__ , axis=-1 ).cumsum(axis=-1 )
A__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A__ = jnp.roll(UpperCamelCase__ , 1 )
score_mask |= score_mask.at[:, 0].set(UpperCamelCase__ )
# min tokens to keep
A__ = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCamelCase__ )
A__ = jnp.where(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = jax.lax.sort_key_val(UpperCamelCase__ , UpperCamelCase__ )[-1]
return next_scores
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = -float("Inf" ) , UpperCamelCase__ = 1 ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or top_k <= 0:
raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
A__ = max(UpperCamelCase__ , UpperCamelCase__ )
A__ = filter_value
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ , A__ = scores.shape
A__ = jnp.full(batch_size * vocab_size , self.filter_value )
A__ = min(self.top_k , scores.shape[-1] ) # Safety check
A__ , A__ = lax.top_k(UpperCamelCase__ , UpperCamelCase__ )
A__ = jnp.broadcast_to((jnp.arange(UpperCamelCase__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A__ = topk_scores.flatten()
A__ = topk_indices.flatten() + shift
A__ = next_scores_flat.at[topk_indices_flat].set(UpperCamelCase__ )
A__ = next_scores_flat.reshape(UpperCamelCase__ , UpperCamelCase__ )
return next_scores
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = bos_token_id
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = jnp.full(scores.shape , -float("inf" ) )
A__ = 1 - jnp.bool_(cur_len - 1 )
A__ = jnp.where(UpperCamelCase__ , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCamelCase__ )
return scores
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = max_length
A__ = eos_token_id
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = jnp.full(scores.shape , -float("inf" ) )
A__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A__ = jnp.where(UpperCamelCase__ , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCamelCase__ )
return scores
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or min_length < 0:
raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or eos_token_id < 0:
raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
A__ = min_length
A__ = eos_token_id
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A__ = jnp.where(UpperCamelCase__ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCamelCase__ )
return scores
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = list(UpperCamelCase__ )
A__ = begin_index
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = 1 - jnp.bool_(cur_len - self.begin_index )
A__ = jnp.where(UpperCamelCase__ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCamelCase__ )
return scores
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = list(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = dict(UpperCamelCase__ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A__ = force_token_array.at[index].set(UpperCamelCase__ )
A__ = jnp.intaa(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
def _force_token(UpperCamelCase__ ):
A__ = scores.shape[0]
A__ = self.force_token_array[generation_idx]
A__ = jnp.ones_like(UpperCamelCase__ , dtype=scores.dtype ) * -float("inf" )
A__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A__ = lax.dynamic_update_slice(UpperCamelCase__ , UpperCamelCase__ , (0, current_token) )
return new_scores
A__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCamelCase__ ) , lambda: scores , ) , )
return scores
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = generate_config.eos_token_id
A__ = generate_config.no_timestamps_token_id
A__ = generate_config.no_timestamps_token_id + 1
A__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(UpperCamelCase__ , "max_initial_timestamp_index" ):
A__ = generate_config.max_initial_timestamp_index
else:
A__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A__ = model_config.vocab_size
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(UpperCamelCase__ , UpperCamelCase__ ):
A__ = jnp.where((cur_len - self.begin_index) >= 1 , UpperCamelCase__ , UpperCamelCase__ )
A__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCamelCase__ , )
A__ = jnp.where((cur_len - self.begin_index) < 2 , UpperCamelCase__ , UpperCamelCase__ )
A__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCamelCase__ , UpperCamelCase__ , )
return jnp.where(
UpperCamelCase__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCamelCase__ , )
A__ = jax.vmap(UpperCamelCase__ )(UpperCamelCase__ , UpperCamelCase__ )
A__ = jnp.where(cur_len == self.begin_index , UpperCamelCase__ , UpperCamelCase__ )
A__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCamelCase__ , )
A__ = self.timestamp_begin + self.max_initial_timestamp_index
A__ = jnp.where(
UpperCamelCase__ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCamelCase__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
A__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 )
def handle_cumulative_probs(UpperCamelCase__ , UpperCamelCase__ ):
A__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCamelCase__ , )
A__ = jax.vmap(UpperCamelCase__ )(UpperCamelCase__ , UpperCamelCase__ )
return scores | 261 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase ={
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 261 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = os.path.join(args.tf_model_dir , "parameters.json" )
__lowercase = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
__lowercase = args.output + ".pt"
__lowercase = OrderedDict()
with tf.device("/CPU:0" ):
__lowercase = tf.train.load_checkpoint(args.tf_model_dir )
__lowercase = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
__lowercase = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
__lowercase = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
__lowercase = 8
__lowercase = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
__lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(_lowercase )
elif key_name.startswith("model/moe" ):
__lowercase = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
__lowercase = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
__lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(_lowercase )
elif key_name.endswith("/softmlp/kernel" ):
__lowercase = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
__lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(_lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
__lowercase = key_name[-9:-7]
for i in range(16 ):
__lowercase = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
__lowercase = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
__lowercase = torch.tensor(_lowercase )
elif key_name.startswith("model/mlp" ):
__lowercase = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
__lowercase = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
__lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(_lowercase )
elif key_name.endswith("/p1/bias" ):
__lowercase = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(_lowercase )
elif key_name.endswith("/p2/kernel" ):
__lowercase = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
__lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(_lowercase )
elif key_name.endswith("/p2/bias" ):
__lowercase = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(_lowercase )
elif key_name.startswith("model/ln" ):
__lowercase = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
__lowercase = "model.blocks.%d.feed_forward.norm.bias" % player
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(_lowercase )
elif key_name.endswith("/g" ):
__lowercase = "model.blocks.%d.feed_forward.norm.weight" % player
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(_lowercase )
elif key_name.startswith("model/att" ):
__lowercase = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
__lowercase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
__lowercase = state[:, 0, :, :]
__lowercase = state[:, 1, :, :]
__lowercase = state[:, 2, :, :]
__lowercase = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__lowercase = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__lowercase = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__lowercase = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
__lowercase = torch.tensor(_lowercase )
__lowercase = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
__lowercase = torch.tensor(_lowercase )
__lowercase = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
__lowercase = torch.tensor(_lowercase )
elif key_name.endswith("/o/kernel" ):
__lowercase = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
__lowercase = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(_lowercase )
elif key_name.startswith("model/an" ):
__lowercase = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
__lowercase = "model.blocks.%d.self_attn.norm.bias" % player
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(_lowercase )
elif key_name.endswith("/g" ):
__lowercase = "model.blocks.%d.self_attn.norm.weight" % player
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(_lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
__lowercase = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
__lowercase = "model.%s.weight" % nlayer
__lowercase = vnp.copy() # same in embedded
__lowercase = torch.tensor(_lowercase )
if key_name.startswith("model/wte" ):
__lowercase = "lm_head.weight"
__lowercase = vnp.copy() # same in embedded
__lowercase = torch.tensor(_lowercase )
elif key_name.startswith("model/wob" ):
__lowercase = "final_logits_bias"
__lowercase = vnp.copy() # same in embedded
__lowercase = state.reshape((1, -1) )
__lowercase = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
__lowercase = "model.last_project.weight"
__lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowercase = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
__lowercase = "model.last_project.bias"
__lowercase = vnp.copy() # same because it is one dimensional
__lowercase = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
lowerCAmelCase__ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 321 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class __a :
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase_ = start
lowerCAmelCase_ = end
lowerCAmelCase_ = val
lowerCAmelCase_ = (start + end) // 2
lowerCAmelCase_ = left
lowerCAmelCase_ = right
def __repr__( self ):
'''simple docstring'''
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class __a :
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = collection
lowerCAmelCase_ = function
if self.collection:
lowerCAmelCase_ = self._build_tree(0 , len(UpperCAmelCase ) - 1 )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
self._update_tree(self.root , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
return self._query_range(self.root , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(UpperCAmelCase , UpperCAmelCase , self.collection[start] )
lowerCAmelCase_ = (start + end) // 2
lowerCAmelCase_ = self._build_tree(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ = self._build_tree(mid + 1 , UpperCAmelCase )
return SegmentTreeNode(UpperCAmelCase , UpperCAmelCase , self.fn(left.val , right.val ) , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
if node.start == i and node.end == i:
lowerCAmelCase_ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCAmelCase , UpperCAmelCase )
else:
self._update_tree(node.right , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ = self.fn(node.left.val , node.right.val )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCAmelCase , UpperCAmelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCAmelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCAmelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase_ ( self ):
'''simple docstring'''
if self.root is not None:
lowerCAmelCase_ = Queue()
queue.put(self.root )
while not queue.empty():
lowerCAmelCase_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowercase_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 552 | 0 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 434 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def lowercase__ ( ) -> Any:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' )
| 434 | 1 |
from __future__ import annotations
import math
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ ):
UpperCAmelCase__: List[str] = size
# approximate the overall size of segment tree with given value
UpperCAmelCase__: Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCAmelCase__: List[str] = [0 for i in range(0 , 4 * size )]
UpperCAmelCase__: Union[str, Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def _UpperCAmelCase ( self , lowerCamelCase__ ):
return idx * 2
def _UpperCAmelCase ( self , lowerCamelCase__ ):
return idx * 2 + 1
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if left_element == right_element:
UpperCAmelCase__: List[str] = a[left_element - 1]
else:
UpperCAmelCase__: Any = (left_element + right_element) // 2
self.build(self.left(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.build(self.right(lowerCamelCase__ ) , mid + 1 , lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = max(
self.segment_tree[self.left(lowerCamelCase__ )] , self.segment_tree[self.right(lowerCamelCase__ )] )
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if self.flag[idx] is True:
UpperCAmelCase__: List[Any] = self.lazy[idx]
UpperCAmelCase__: Any = False
if left_element != right_element:
UpperCAmelCase__: Dict = self.lazy[idx]
UpperCAmelCase__: str = self.lazy[idx]
UpperCAmelCase__: Dict = True
UpperCAmelCase__: Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCAmelCase__: int = val
if left_element != right_element:
UpperCAmelCase__: Optional[int] = val
UpperCAmelCase__: Any = val
UpperCAmelCase__: Tuple = True
UpperCAmelCase__: Dict = True
return True
UpperCAmelCase__: Optional[int] = (left_element + right_element) // 2
self.update(self.left(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.update(self.right(lowerCamelCase__ ) , mid + 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: List[Any] = max(
self.segment_tree[self.left(lowerCamelCase__ )] , self.segment_tree[self.right(lowerCamelCase__ )] )
return True
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if self.flag[idx] is True:
UpperCAmelCase__: Tuple = self.lazy[idx]
UpperCAmelCase__: Optional[int] = False
if left_element != right_element:
UpperCAmelCase__: Dict = self.lazy[idx]
UpperCAmelCase__: Any = self.lazy[idx]
UpperCAmelCase__: Dict = True
UpperCAmelCase__: Optional[Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCAmelCase__: Union[str, Any] = (left_element + right_element) // 2
UpperCAmelCase__: Dict = self.query(self.left(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__: Any = self.query(self.right(lowerCamelCase__ ) , mid + 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return max(lowerCamelCase__ , lowerCamelCase__ )
def __str__( self ):
return str([self.query(1 , 1 , self.size , lowerCamelCase__ , lowerCamelCase__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowerCAmelCase : int =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_lowerCAmelCase : int =15
_lowerCAmelCase : int =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt) | 113 |
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ , UpperCAmelCase__: int = [], []
while len(SCREAMING_SNAKE_CASE ) > 1:
UpperCAmelCase__ , UpperCAmelCase__: str = min(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
start.append(SCREAMING_SNAKE_CASE )
end.append(SCREAMING_SNAKE_CASE )
collection.remove(SCREAMING_SNAKE_CASE )
collection.remove(SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] =input("""Enter numbers separated by a comma:\n""").strip()
_lowerCAmelCase : Dict =[int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""") | 113 | 1 |
_snake_case = {}
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase = _calculate(days - 1 , _lowercase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase = _calculate(days - 1 , _lowercase , 0 )
UpperCamelCase = state_late + state_absent + state_ontime
UpperCamelCase = prizestrings
return prizestrings
def __lowerCamelCase ( _lowercase = 30 ) -> int:
return _calculate(_lowercase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 700 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCamelCase = b.T
UpperCamelCase = np.sum(np.square(_lowercase ) , axis=1 )
UpperCamelCase = np.sum(np.square(_lowercase ) , axis=0 )
UpperCamelCase = np.matmul(_lowercase , _lowercase )
UpperCamelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowerCamelCase ( _lowercase , _lowercase ) -> Dict:
UpperCamelCase = x.reshape(-1 , 3 )
UpperCamelCase = squared_euclidean_distance(_lowercase , _lowercase )
return np.argmin(_lowercase , axis=1 )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =["pixel_values"]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Union[List[List[int]], np.ndarray]] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = size if size is not None else {'height': 2_56, 'width': 2_56}
UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ ) if clusters is not None else None
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_normalize
UpperCamelCase = do_color_quantize
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , ):
"""simple docstring"""
UpperCamelCase = rescale(image=SCREAMING_SNAKE_CASE__ , scale=1 / 127.5 , data_format=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = image - 1
return image
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[List[List[int]], np.ndarray]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Any , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase = clusters if clusters is not None else self.clusters
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_color_quantize:
UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = color_quantize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase = images.shape[0]
UpperCamelCase = images.reshape(SCREAMING_SNAKE_CASE__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase = list(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
UpperCamelCase = {'input_ids': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 170 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Dict , snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return f"gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCAmelCase ) for s in shape] )}.npy"
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : int=0 , snake_case : Dict=(4, 4, 6_4, 6_4) , snake_case : List[str]=False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : int = jnp.bfloataa if fpaa else jnp.floataa
UpperCamelCase_ : str = jnp.array(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) , dtype=_lowerCAmelCase )
return image
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : List[Any]=False , snake_case : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> str:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
UpperCamelCase_ : int = 'bf16' if fpaa else None
UpperCamelCase_, UpperCamelCase_ : Dict = FlaxUNetaDConditionModel.from_pretrained(
_lowerCAmelCase , subfolder='unet' , dtype=_lowerCAmelCase , revision=_lowerCAmelCase )
return model, params
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Tuple=0 , snake_case : List[str]=(4, 7_7, 7_6_8) , snake_case : List[str]=False ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : int = jnp.bfloataa if fpaa else jnp.floataa
UpperCamelCase_ : Optional[Any] = jnp.array(load_hf_numpy(self.get_file_format(_lowerCAmelCase , _lowerCAmelCase ) ) , dtype=_lowerCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Optional[Any] , snake_case : Any , snake_case : Optional[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : int = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=_lowerCAmelCase )
UpperCamelCase_ : Tuple = self.get_latents(_lowerCAmelCase , fpaa=_lowerCAmelCase )
UpperCamelCase_ : List[Any] = self.get_encoder_hidden_states(_lowerCAmelCase , fpaa=_lowerCAmelCase )
UpperCamelCase_ : str = model.apply(
{'params': params} , _lowerCAmelCase , jnp.array(_lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCAmelCase , ).sample
assert sample.shape == latents.shape
UpperCamelCase_ : Union[str, Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCamelCase_ : Dict = jnp.array(_lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Dict = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=_lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = self.get_latents(_lowerCAmelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_lowerCAmelCase )
UpperCamelCase_ : Dict = self.get_encoder_hidden_states(_lowerCAmelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_lowerCAmelCase )
UpperCamelCase_ : Tuple = model.apply(
{'params': params} , _lowerCAmelCase , jnp.array(_lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=_lowerCAmelCase , ).sample
assert sample.shape == latents.shape
UpperCamelCase_ : List[str] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
UpperCamelCase_ : Union[str, Any] = jnp.array(_lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-2 )
| 417 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase__ : Dict = random.Random()
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=1.0 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Dict=None ) -> Tuple:
if rng is None:
SCREAMING_SNAKE_CASE_ = global_rng
SCREAMING_SNAKE_CASE_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Union[str, Any]=400 , _lowerCAmelCase : Tuple=2_000 , _lowerCAmelCase : str=1 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[Any]=16_000 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Any=80 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : List[str]=64 , _lowerCAmelCase : List[Any]="hann_window" , _lowerCAmelCase : Any=80 , _lowerCAmelCase : List[Any]=7_600 , _lowerCAmelCase : List[Any]=1E-10 , _lowerCAmelCase : Optional[Any]=True , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = min_seq_length
SCREAMING_SNAKE_CASE_ = max_seq_length
SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = hop_length
SCREAMING_SNAKE_CASE_ = win_length
SCREAMING_SNAKE_CASE_ = win_function
SCREAMING_SNAKE_CASE_ = fmin
SCREAMING_SNAKE_CASE_ = fmax
SCREAMING_SNAKE_CASE_ = mel_floor
SCREAMING_SNAKE_CASE_ = return_attention_mask
def lowerCAmelCase_ ( self : Union[str, Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : str=False ):
def _flatten(_lowerCAmelCase : Dict ):
return list(itertools.chain(*_lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE_ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Optional[int]=False ):
if equal_length:
SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = SpeechTaFeatureExtractor
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractionTester(self )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int ):
self.assertTrue(np.all(np.mean(_lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase_ ( self : List[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE_ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = range(800 , 1_400 , 200 )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE_ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE_ = [None, 1_600, None]
for max_length, padding in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = feat_extract(_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=1_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = feat_extract(
_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=2_000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE_ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCAmelCase_ ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(_lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE_ = np.asarray(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE_ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE_ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.feat_extract_dict
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE_ = [len(_lowerCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE_ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE_ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE_ = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ = ds.sort('id' ).select(range(_lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93_680) )
self.assertTrue(torch.allclose(input_values[0, :30] , _lowerCAmelCase , atol=1E-6 ) )
def lowerCAmelCase_ ( self : Optional[int] ):
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = feature_extractor(audio_target=_lowerCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowerCAmelCase , atol=1E-4 ) ) | 31 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
UpperCAmelCase__ : Optional[int] = logging.getLogger(__name__)
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''summarization'''
__UpperCamelCase : int = ['''loss''']
__UpperCamelCase : Dict = ROUGE_KEYS
__UpperCamelCase : Any = '''rouge2'''
def __init__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , mode=self.mode , **SCREAMING_SNAKE_CASE__ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE__ : str = Path(self.output_dir ) / """metrics.json"""
SCREAMING_SNAKE_CASE__ : Any = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = self.config.model_type
SCREAMING_SNAKE_CASE__ : Dict = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
SCREAMING_SNAKE_CASE__ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE__ : Any = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE__ : Tuple = get_git_info()["""repo_sha"""]
SCREAMING_SNAKE_CASE__ : List[str] = hparams.num_workers
SCREAMING_SNAKE_CASE__ : Optional[int] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE__ : List[Any] = self.decoder_start_token_id
SCREAMING_SNAKE_CASE__ : List[str] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : int = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE__ : List[str] = self.model.config.max_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict[str, List[str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(SCREAMING_SNAKE_CASE__ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
SCREAMING_SNAKE_CASE__ : str = True
return readable_batch
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return self.model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.batch_decode(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
return lmap(str.strip , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch["""input_ids"""], batch["""attention_mask"""]
SCREAMING_SNAKE_CASE__ : Dict = batch["""labels"""]
if isinstance(self.model , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = self.model._shift_right(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_input_ids
self.save_readable_batch(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = self(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE__ : Dict = nn.CrossEntropyLoss(ignore_index=SCREAMING_SNAKE_CASE__ )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : int = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )
SCREAMING_SNAKE_CASE__ : List[str] = label_smoothed_nll_loss(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.hparams.label_smoothing , ignore_index=SCREAMING_SNAKE_CASE__ )
return (loss,)
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.tokenizer.pad_token_id
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self._step(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = dict(zip(self.loss_names , SCREAMING_SNAKE_CASE__ ) )
# tokens per batch
SCREAMING_SNAKE_CASE__ : List[Any] = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE__ : int = batch["""input_ids"""].shape[0]
SCREAMING_SNAKE_CASE__ : int = batch["""input_ids"""].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE__ : int = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return self._generative_step(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="val" ) -> Dict:
"""simple docstring"""
self.step_count += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE__ : Any = losses["""loss"""]
SCREAMING_SNAKE_CASE__ : Dict = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
SCREAMING_SNAKE_CASE__ : str = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE__ : torch.FloatTensor = torch.tensor(SCREAMING_SNAKE_CASE__ ).type_as(SCREAMING_SNAKE_CASE__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = self.step_count
self.metrics[prefix].append(SCREAMING_SNAKE_CASE__ ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE__ : Optional[Any] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return calculate_rouge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE__ : int = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE__ : Optional[int] = (time.time() - ta) / batch["""input_ids"""].shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = self.ids_to_clean_text(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.ids_to_clean_text(batch["""labels"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._step(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(zip(self.loss_names , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Dict = self.calc_generative_metrics(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = np.mean(lmap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
base_metrics.update(gen_time=SCREAMING_SNAKE_CASE__ , gen_len=SCREAMING_SNAKE_CASE__ , preds=SCREAMING_SNAKE_CASE__ , target=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return base_metrics
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
return self._generative_step(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.validation_epoch_end(SCREAMING_SNAKE_CASE__ , prefix="""test""" )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> SeqaSeqDataset:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.n_obs[type_path]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.target_lens[type_path]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dataset_class(
self.tokenizer , type_path=SCREAMING_SNAKE_CASE__ , n_obs=SCREAMING_SNAKE_CASE__ , max_target_length=SCREAMING_SNAKE_CASE__ , **self.dataset_kwargs , )
return dataset
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_dataset(SCREAMING_SNAKE_CASE__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE__ : Tuple = dataset.make_sortish_sampler(SCREAMING_SNAKE_CASE__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=dataset.collate_fn , shuffle=SCREAMING_SNAKE_CASE__ , num_workers=self.num_workers , sampler=SCREAMING_SNAKE_CASE__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE__ : int = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
SCREAMING_SNAKE_CASE__ , batch_sampler=SCREAMING_SNAKE_CASE__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=dataset.collate_fn , shuffle=SCREAMING_SNAKE_CASE__ , num_workers=self.num_workers , sampler=SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self ) -> DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=SCREAMING_SNAKE_CASE__ )
return dataloader
def __magic_name__ (self ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def __magic_name__ (self ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
add_generic_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"""--max_source_length""" , default=10_24 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_42 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_42 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--max_tokens_per_batch""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--logger_name""" , type=SCREAMING_SNAKE_CASE__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=SCREAMING_SNAKE_CASE__ , default=5_00 , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=SCREAMING_SNAKE_CASE__ , default="""summarization""" , required=SCREAMING_SNAKE_CASE__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=SCREAMING_SNAKE_CASE__ , default=0.0 , required=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--src_lang""" , type=SCREAMING_SNAKE_CASE__ , default="""""" , required=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--tgt_lang""" , type=SCREAMING_SNAKE_CASE__ , default="""""" , required=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--eval_beams""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"""--val_metric""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=SCREAMING_SNAKE_CASE__ , default=1 , required=SCREAMING_SNAKE_CASE__ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = '''translation'''
__UpperCamelCase : Optional[Any] = ['''loss''']
__UpperCamelCase : Optional[int] = ['''bleu''']
__UpperCamelCase : Tuple = '''bleu'''
def __init__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hparams.src_lang
SCREAMING_SNAKE_CASE__ : int = hparams.tgt_lang
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> dict:
"""simple docstring"""
return calculate_bleu(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( _snake_case ,_snake_case=None ):
Path(args.output_dir ).mkdir(exist_ok=_snake_case )
check_output_dir(_snake_case ,expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE__ : SummarizationModule = SummarizationModule(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : SummarizationModule = TranslationModule(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
SCREAMING_SNAKE_CASE__ : List[Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE__ : Optional[int] = os.environ.get("""WANDB_PROJECT""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = WandbLogger(name=model.output_dir.name ,project=_snake_case )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE__ : Tuple = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE__ : List[str] = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Any = args.val_metric == """loss"""
SCREAMING_SNAKE_CASE__ : pl.Trainer = generic_train(
_snake_case ,_snake_case ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,_snake_case ) ,early_stopping_callback=_snake_case ,logger=_snake_case ,)
pickle_save(model.hparams ,model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE__ : List[str] = """"""
SCREAMING_SNAKE_CASE__ : List[Any] = sorted(glob.glob(os.path.join(args.output_dir ,"""*.ckpt""" ) ,recursive=_snake_case ) )
if checkpoints:
SCREAMING_SNAKE_CASE__ : Optional[int] = checkpoints[-1]
SCREAMING_SNAKE_CASE__ : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
UpperCAmelCase__ : Optional[int] = pl.Trainer.add_argparse_args(parser)
UpperCAmelCase__ : List[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase__ : Any = parser.parse_args()
main(args)
| 717 |
"""simple docstring"""
from math import pi, sqrt, tan
def lowercase_ ( _snake_case ):
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowercase_ ( _snake_case ):
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def lowercase_ ( _snake_case ):
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def lowercase_ ( _snake_case ,_snake_case ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
SCREAMING_SNAKE_CASE__ : int = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowercase_ ( _snake_case ,_snake_case ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def lowercase_ ( _snake_case ,_snake_case ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(_snake_case ,2 ) * torus_radius * tube_radius
def lowercase_ ( _snake_case ,_snake_case ):
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def lowercase_ ( _snake_case ):
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def lowercase_ ( _snake_case ,_snake_case ):
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
SCREAMING_SNAKE_CASE__ : List[str] = (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE__ : List[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowercase_ ( _snake_case ,_snake_case ):
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def lowercase_ ( _snake_case ):
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def lowercase_ ( _snake_case ,_snake_case ):
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def lowercase_ ( _snake_case ,_snake_case ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def lowercase_ ( _snake_case ,_snake_case ):
if not isinstance(_snake_case ,_snake_case ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"""Rectangle: {area_rectangle(1_0, 2_0) = }""")
print(f"""Square: {area_square(1_0) = }""")
print(f"""Triangle: {area_triangle(1_0, 1_0) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }""")
print(f"""Parallelogram: {area_parallelogram(1_0, 2_0) = }""")
print(f"""Rhombus: {area_rhombus(1_0, 2_0) = }""")
print(f"""Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }""")
print(f"""Circle: {area_circle(2_0) = }""")
print(f"""Ellipse: {area_ellipse(1_0, 2_0) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(f"""Cube: {surface_area_cube(2_0) = }""")
print(f"""Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }""")
print(f"""Sphere: {surface_area_sphere(2_0) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(2_0) = }""")
print(f"""Cone: {surface_area_cone(1_0, 2_0) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }""")
print(f"""Cylinder: {surface_area_cylinder(1_0, 2_0) = }""")
print(f"""Torus: {surface_area_torus(2_0, 1_0) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 1_0) = }""")
print(f"""Square: {area_reg_polygon(4, 1_0) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 1_0) = }""")
| 545 | 0 |
def _UpperCAmelCase ( UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def _UpperCAmelCase ( ):
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 611 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a :
def __init__( self : str ):
"""simple docstring"""
__lowerCAmelCase = ""
__lowerCAmelCase = ""
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = 256
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[str] ):
"""simple docstring"""
__lowerCAmelCase = cva.imread(snake_case__ , 0 )
__lowerCAmelCase = copy.deepcopy(self.img )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
__lowerCAmelCase = np.sum(snake_case__ )
for i in range(len(snake_case__ ) ):
__lowerCAmelCase = x[i] / self.k
self.sk += prk
__lowerCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
__lowerCAmelCase = int(last % last )
__lowerCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case__ )
__lowerCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
__lowerCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__lowerCAmelCase = self.img[j][i]
if num != self.last_list[num]:
__lowerCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 611 | 1 |
'''simple docstring'''
lowercase = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 701 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase = input('''Enter image url: ''').strip()
print(F"""Downloading image from {url} ...""")
lowercase = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowercase = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowercase = requests.get(image_url).content
lowercase = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 564 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ = None ) -> None:
"""simple docstring"""
if components is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
def __len__(self ) -> int:
"""simple docstring"""
return len(self.__components )
def __str__(self ) -> str:
"""simple docstring"""
return "(" + ",".join(map(SCREAMING_SNAKE_CASE__ , self.__components ) ) + ")"
def __add__(self , SCREAMING_SNAKE_CASE__ ) -> Vector:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = len(self )
if size == len(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
else:
raise Exception("""must have the same size""" )
def __sub__(self , SCREAMING_SNAKE_CASE__ ) -> Vector:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = len(self )
if size == len(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : int = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__(self , SCREAMING_SNAKE_CASE__ ) -> Vector:
"""simple docstring"""
...
@overload
def __mul__(self , SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
...
def __mul__(self , SCREAMING_SNAKE_CASE__ ) -> float | Vector:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , (float, int) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(self ) == len(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Tuple = len(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ )]
return sum(SCREAMING_SNAKE_CASE__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ (self ) -> Vector:
"""simple docstring"""
return Vector(self.__components )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
SCREAMING_SNAKE_CASE__ : str = value
def __magic_name__ (self ) -> float:
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self * other
SCREAMING_SNAKE_CASE__ : Any = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case )
return Vector([0] * dimension )
def lowercase_ ( _snake_case ,_snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (isinstance(_snake_case ,_snake_case ))
SCREAMING_SNAKE_CASE__ : Tuple = [0] * dimension
SCREAMING_SNAKE_CASE__ : List[str] = 1
return Vector(_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
assert (
isinstance(_snake_case ,_snake_case )
and isinstance(_snake_case ,_snake_case )
and (isinstance(_snake_case ,(int, float) ))
)
return x * scalar + y
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
random.seed(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [random.randint(_snake_case ,_snake_case ) for _ in range(_snake_case )]
return Vector(_snake_case )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = matrix
SCREAMING_SNAKE_CASE__ : int = w
SCREAMING_SNAKE_CASE__ : Any = h
def __str__(self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__(self , SCREAMING_SNAKE_CASE__ ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ : Any = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE__ )
return Matrix(SCREAMING_SNAKE_CASE__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__(self , SCREAMING_SNAKE_CASE__ ) -> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE__ : List[Any] = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ : Dict = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE__ )
return Matrix(SCREAMING_SNAKE_CASE__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__(self , SCREAMING_SNAKE_CASE__ ) -> Matrix:
"""simple docstring"""
...
@overload
def __mul__(self , SCREAMING_SNAKE_CASE__ ) -> Vector:
"""simple docstring"""
...
def __mul__(self , SCREAMING_SNAKE_CASE__ ) -> Vector | Matrix:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # matrix-vector
if len(SCREAMING_SNAKE_CASE__ ) == self.__width:
SCREAMING_SNAKE_CASE__ : Dict = zero_vector(self.__height )
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ : Tuple = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE__ )
for j in range(self.__width )
]
ans.change_component(SCREAMING_SNAKE_CASE__ , sum(SCREAMING_SNAKE_CASE__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): # matrix-scalar
SCREAMING_SNAKE_CASE__ : int = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(SCREAMING_SNAKE_CASE__ , self.__width , self.__height )
return None
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.__height
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.__width
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE__ : Optional[int] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ (self ) -> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE__ : str = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE__ ) for y in range(self.__width )
]
return sum(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : list[list[float]] = [[0] * n for _ in range(_snake_case )]
return Matrix(_snake_case ,_snake_case ,_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
random.seed(_snake_case )
SCREAMING_SNAKE_CASE__ : list[list[float]] = [
[random.randint(_snake_case ,_snake_case ) for _ in range(_snake_case )] for _ in range(_snake_case )
]
return Matrix(_snake_case ,_snake_case ,_snake_case )
| 223 |
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = cva.getAffineTransform(_snake_case ,_snake_case )
return cva.warpAffine(_snake_case ,_snake_case ,(rows, cols) )
if __name__ == "__main__":
# read original image
UpperCAmelCase__ : str = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
UpperCAmelCase__ : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCAmelCase__ , UpperCAmelCase__ : str = gray_img.shape
# set different points to rotate image
UpperCAmelCase__ : List[Any] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCAmelCase__ : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCAmelCase__ : List[str] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCAmelCase__ : str = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCAmelCase__ : List[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCAmelCase__ : List[Any] = plt.figure(1)
UpperCAmelCase__ : Optional[int] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 223 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __A ( unittest.TestCase ):
def __init__( self :str , __snake_case :Union[str, Any] , __snake_case :Tuple=7 , __snake_case :Optional[Any]=3 , __snake_case :Dict=30 , __snake_case :int=4_00 , __snake_case :Tuple=True , __snake_case :int=None , __snake_case :str=True , __snake_case :Dict=1 / 2_55 , __snake_case :Tuple=True , __snake_case :Tuple=[0.5, 0.5, 0.5] , __snake_case :List[Any]=[0.5, 0.5, 0.5] , __snake_case :Optional[Any]=True , ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__magic_name__ : Optional[Any] =parent
__magic_name__ : List[str] =batch_size
__magic_name__ : Tuple =num_channels
__magic_name__ : Tuple =min_resolution
__magic_name__ : Optional[Any] =max_resolution
__magic_name__ : Optional[Any] =do_resize
__magic_name__ : Tuple =size
__magic_name__ : Dict =do_rescale
__magic_name__ : List[str] =rescale_factor
__magic_name__ : Union[str, Any] =do_normalize
__magic_name__ : str =image_mean
__magic_name__ : int =image_std
__magic_name__ : str =do_pad
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def A__ ( self :Optional[int] , __snake_case :Tuple , __snake_case :str=False ):
'''simple docstring'''
if not batched:
__magic_name__ : int =image_inputs[0]
if isinstance(__snake_case , Image.Image ):
__magic_name__ , __magic_name__ : List[str] =image.size
else:
__magic_name__ , __magic_name__ : Optional[Any] =image.shape[1], image.shape[2]
if w < h:
__magic_name__ : str =int(self.size["""shortest_edge"""] * h / w )
__magic_name__ : Dict =self.size["""shortest_edge"""]
elif w > h:
__magic_name__ : List[str] =self.size["""shortest_edge"""]
__magic_name__ : str =int(self.size["""shortest_edge"""] * w / h )
else:
__magic_name__ : int =self.size["""shortest_edge"""]
__magic_name__ : Dict =self.size["""shortest_edge"""]
else:
__magic_name__ : Tuple =[]
for image in image_inputs:
__magic_name__ , __magic_name__ : Optional[int] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__magic_name__ : Optional[int] =max(__snake_case , key=lambda __snake_case : item[0] )[0]
__magic_name__ : Optional[Any] =max(__snake_case , key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __A ( UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = DetrImageProcessor if is_vision_available() else None
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : List[str] =DetrImageProcessingTester(self )
@property
def A__ ( self :Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """image_mean""" ) )
self.assertTrue(hasattr(__snake_case , """image_std""" ) )
self.assertTrue(hasattr(__snake_case , """do_normalize""" ) )
self.assertTrue(hasattr(__snake_case , """do_rescale""" ) )
self.assertTrue(hasattr(__snake_case , """rescale_factor""" ) )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """do_pad""" ) )
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Dict =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , __snake_case )
__magic_name__ : List[Any] =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__snake_case )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __snake_case )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
pass
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
__magic_name__ : List[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : List[str] =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ , __magic_name__ : Any =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
__magic_name__ : List[Any] =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
__magic_name__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : Any =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Any =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : Optional[int] =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
__magic_name__ : Optional[int] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : List[Any] =self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__magic_name__ : Dict =image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
__magic_name__ , __magic_name__ : Any =self.image_processor_tester.get_expected_values(__snake_case , batched=__snake_case )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Tuple =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__magic_name__ : List[str] =json.loads(f.read() )
__magic_name__ : int ={"""image_id""": 3_97_69, """annotations""": target}
# encode them
__magic_name__ : List[str] =DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__magic_name__ : Optional[int] =image_processing(images=__snake_case , annotations=__snake_case , return_tensors="""pt""" )
# verify pixel values
__magic_name__ : Any =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , __snake_case )
__magic_name__ : Optional[int] =torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
__magic_name__ : Tuple =torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __snake_case ) )
# verify boxes
__magic_name__ : List[Any] =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __snake_case )
__magic_name__ : Dict =torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __snake_case , atol=1E-3 ) )
# verify image_id
__magic_name__ : List[str] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __snake_case ) )
# verify is_crowd
__magic_name__ : int =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __snake_case ) )
# verify class_labels
__magic_name__ : List[str] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __snake_case ) )
# verify orig_size
__magic_name__ : Union[str, Any] =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __snake_case ) )
# verify size
__magic_name__ : Optional[Any] =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __snake_case ) )
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__magic_name__ : Dict =json.loads(f.read() )
__magic_name__ : Tuple ={"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__magic_name__ : str =pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__magic_name__ : Optional[int] =DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__magic_name__ : List[Any] =image_processing(images=__snake_case , annotations=__snake_case , masks_path=__snake_case , return_tensors="""pt""" )
# verify pixel values
__magic_name__ : Union[str, Any] =torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , __snake_case )
__magic_name__ : Optional[int] =torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __snake_case , atol=1E-4 ) )
# verify area
__magic_name__ : List[Any] =torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __snake_case ) )
# verify boxes
__magic_name__ : Optional[Any] =torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __snake_case )
__magic_name__ : Optional[Any] =torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __snake_case , atol=1E-3 ) )
# verify image_id
__magic_name__ : Optional[Any] =torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __snake_case ) )
# verify is_crowd
__magic_name__ : List[str] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __snake_case ) )
# verify class_labels
__magic_name__ : Dict =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __snake_case ) )
# verify masks
__magic_name__ : Tuple =82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __snake_case )
# verify orig_size
__magic_name__ : Any =torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __snake_case ) )
# verify size
__magic_name__ : List[Any] =torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __snake_case ) )
| 367 |
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[int] =[]
__magic_name__ : int =[]
__magic_name__ : str ={
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
__magic_name__ : Dict =len(lowerCamelCase ) if (len(lowerCamelCase ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(lowerCamelCase ) , """Postfix""".center(lowerCamelCase ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCamelCase ) == 0:
stack.append(lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCamelCase ) # push x to stack
print(
x.center(8 ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , sep=""" | """ , ) # Output in tabular format
while len(lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , sep=""" | """ , ) # Output in tabular format
return "".join(lowerCamelCase ) # return Postfix as str
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Union[str, Any] =list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCamelCase ) ):
if infix[i] == "(":
__magic_name__ : str =""")""" # change "(" to ")"
elif infix[i] == ")":
__magic_name__ : int ="""(""" # change ")" to "("
return (infix_2_postfix("""""".join(lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
UpperCAmelCase_ : Dict = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 367 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ : int = logging.get_logger(__name__)
def a_ ( __snake_case : List[Any] , __snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =[]
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> List[Any]:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCamelCase_ =state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase_ =in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCamelCase_ =in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCamelCase_ =in_proj_weight[
-encoder_config.hidden_size :, :
]
def a_ ( __snake_case : Any , __snake_case : Dict , __snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =dct.pop(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =val
def a_ ( __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
lowerCamelCase_ ='https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase_ ='https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
lowerCamelCase_ =Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def a_ ( __snake_case : Tuple , __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =ViTConfig(image_size=384 , qkv_bias=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCamelCase_ =768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCamelCase_ =1024
lowerCamelCase_ =4096
lowerCamelCase_ =24
lowerCamelCase_ =16
lowerCamelCase_ =1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase_ =False
lowerCamelCase_ ='relu'
lowerCamelCase_ =1024
lowerCamelCase_ =True
lowerCamelCase_ =False
lowerCamelCase_ =False
# load HuggingFace model
lowerCamelCase_ =ViTModel(__SCREAMING_SNAKE_CASE , add_pooling_layer=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =TrOCRForCausalLM(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =VisionEncoderDecoderModel(encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
model.eval()
# load state_dict of original model, rename some keys
lowerCamelCase_ =torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='''cpu''' , check_hash=__SCREAMING_SNAKE_CASE )['model']
lowerCamelCase_ =create_rename_keys(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCamelCase_ =state_dict.pop(__SCREAMING_SNAKE_CASE )
if key.startswith('''decoder''' ) and "output_projection" not in key:
lowerCamelCase_ =val
else:
lowerCamelCase_ =val
# load state dict
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowerCamelCase_ =ViTImageProcessor(size=encoder_config.image_size )
lowerCamelCase_ =RobertaTokenizer.from_pretrained('''roberta-large''' )
lowerCamelCase_ =TrOCRProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ =processor(images=prepare_img(__SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
# verify logits
lowerCamelCase_ =torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCamelCase_ =model(pixel_values=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =outputs.logits
lowerCamelCase_ =torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCamelCase_ =torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCamelCase_ =torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
lowerCamelCase_ =torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
lowerCamelCase_ =torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , __SCREAMING_SNAKE_CASE , atol=1e-3 ), "First elements of logits not as expected"
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a_ : int = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 676 |
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> bool:
UpperCamelCase__ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCamelCase__ : set[int] = set()
return any(
node not in visited and depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for node in graph )
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
visited.add(__SCREAMING_SNAKE_CASE )
rec_stk.add(__SCREAMING_SNAKE_CASE )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 410 | 0 |
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> int:
if height >= 1:
move_tower(height - 1 , snake_case__ , snake_case__ , snake_case__ )
move_disk(snake_case__ , snake_case__ )
move_tower(height - 1 , snake_case__ , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ ) -> Optional[Any]:
print("""moving disk from""" , snake_case__ , """to""" , snake_case__ )
def a__ ( ) -> List[Any]:
lowerCamelCase = int(input("""Height of hanoi: """ ).strip() )
move_tower(snake_case__ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 717 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def a__ ( snake_case__ ) -> Dict:
lowerCamelCase = [False] * len(snake_case__ )
lowerCamelCase = [-1] * len(snake_case__ )
def dfs(snake_case__ , snake_case__ ):
lowerCamelCase = True
lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(snake_case__ , 1 - c )
for i in range(len(snake_case__ ) ):
if not visited[i]:
dfs(snake_case__ , 0 )
for i in range(len(snake_case__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 533 | 0 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Any = VideoMAEConfig()
set_architecture_configs(UpperCamelCase__ , UpperCamelCase__ )
if "finetuned" not in model_name:
_a : Dict = False
if "finetuned" in model_name:
_a : Union[str, Any] = """huggingface/label-files"""
if "kinetics" in model_name:
_a : Dict = 4_0_0
_a : str = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
_a : Union[str, Any] = 1_7_4
_a : int = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
_a : str = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_a : Optional[int] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_a : Union[str, Any] = idalabel
_a : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if "small" in model_name:
_a : List[str] = 3_8_4
_a : List[str] = 1_5_3_6
_a : int = 1_2
_a : Optional[Any] = 1_6
_a : Union[str, Any] = 1_2
_a : int = 3
_a : int = 1_9_2
_a : List[Any] = 7_6_8
elif "large" in model_name:
_a : Dict = 1_0_2_4
_a : int = 4_0_9_6
_a : Dict = 2_4
_a : Any = 1_6
_a : Dict = 1_2
_a : int = 8
_a : List[Any] = 5_1_2
_a : str = 2_0_4_8
elif "huge" in model_name:
_a : Union[str, Any] = 1_2_8_0
_a : Optional[Any] = 5_1_2_0
_a : str = 3_2
_a : Optional[Any] = 1_6
_a : Optional[Any] = 1_2
_a : int = 8
_a : List[Any] = 6_4_0
_a : Optional[Any] = 2_5_6_0
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
if "encoder." in name:
_a : Optional[int] = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
_a : Union[str, Any] = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
_a : Optional[int] = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
_a : Any = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_a : Optional[int] = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_a : Dict = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
_a : Dict = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
_a : Dict = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
_a : Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
_a : Optional[int] = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
_a : Optional[Any] = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
_a : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_a : int = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_a : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_a : int = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
_a : Any = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
_a : Optional[int] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
_a : str = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
_a : Dict = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
_a : Tuple = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
_a : Dict = name.replace("""head""" , """classifier""" )
return name
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_a : Optional[Any] = orig_state_dict.pop(UpperCamelCase__ )
if key.startswith("""encoder.""" ):
_a : Optional[int] = key.replace("""encoder.""" , """""" )
if "qkv" in key:
_a : List[str] = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
_a : Dict = config.decoder_hidden_size
_a : Union[str, Any] = int(key_split[2] )
_a : Optional[Any] = """decoder.decoder_layers."""
if "weight" in key:
_a : Optional[Any] = val[:dim, :]
_a : List[str] = val[dim : dim * 2, :]
_a : Any = val[-dim:, :]
else:
_a : Tuple = config.hidden_size
_a : str = int(key_split[1] )
_a : Dict = """videomae.encoder.layer."""
if "weight" in key:
_a : Any = val[:dim, :]
_a : int = val[dim : dim * 2, :]
_a : str = val[-dim:, :]
else:
_a : List[Any] = val
return orig_state_dict
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Dict = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
_a : List[Any] = np.load(UpperCamelCase__ )
return list(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = get_videomae_config(UpperCamelCase__ )
if "finetuned" in model_name:
_a : List[Any] = VideoMAEForVideoClassification(UpperCamelCase__ )
else:
_a : str = VideoMAEForPreTraining(UpperCamelCase__ )
# download original checkpoint, hosted on Google Drive
_a : List[str] = """pytorch_model.bin"""
gdown.cached_download(UpperCamelCase__ , UpperCamelCase__ , quiet=UpperCamelCase__ )
_a : int = torch.load(UpperCamelCase__ , map_location="""cpu""" )
if "model" in files:
_a : Tuple = files["""model"""]
else:
_a : int = files["""module"""]
_a : int = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# verify model on basic input
_a : str = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
_a : int = prepare_video()
_a : Optional[int] = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
if "finetuned" not in model_name:
_a : Dict = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
_a : Union[str, Any] = torch.load(UpperCamelCase__ )
_a : List[Any] = model(**UpperCamelCase__ )
_a : Any = outputs.logits
_a : Dict = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
_a : Any = torch.Size([1, 4_0_0] )
_a : Optional[Any] = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
_a : int = torch.Size([1, 1_7_4] )
_a : Any = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
_a : List[Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
_a : str = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
_a : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] )
_a : Union[str, Any] = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
_a : Dict = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
_a : List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
_a : List[str] = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
_a : List[Any] = torch.Size([1, 4_0_0] )
_a : str = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
_a : Optional[Any] = torch.Size([1, 4_0_0] )
_a : Dict = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
_a : int = torch.Size([1, 4_0_0] )
_a : Union[str, Any] = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
_a : str = torch.Size([1, 4_0_0] )
_a : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
_a : List[Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
_a : Optional[Any] = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
_a : Optional[Any] = torch.Size([1, 1_7_4] )
_a : Any = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
_a : Optional[Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
_a : int = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
_a : str = torch.Size([1, 1_7_4] )
_a : str = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(F"""Model name not supported. Should be one of {model_names}""" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
_a : Any = outputs.loss
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(UpperCamelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 389 |
"""simple docstring"""
from PIL import Image
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
def brightness(UpperCamelCase__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_snake_case = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 389 | 1 |
import math
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :int ):
UpperCAmelCase_ = len(__magic_name__ )
UpperCAmelCase_ = int(math.floor(math.sqrt(__magic_name__ ) ) )
UpperCAmelCase_ = 0
while arr[min(__magic_name__ , __magic_name__ ) - 1] < x:
UpperCAmelCase_ = step
step += int(math.floor(math.sqrt(__magic_name__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCAmelCase_ = prev + 1
if prev == min(__magic_name__ , __magic_name__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(',')]
_lowerCamelCase : str = int(input('Enter the number to be searched:\n'))
_lowerCamelCase : Optional[Any] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f"Number {x} is at index {res}")
| 702 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case__ :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]="resnet50" , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int=True , ) -> int:
UpperCAmelCase_ = parent
UpperCAmelCase_ = out_indices if out_indices is not None else [4]
UpperCAmelCase_ = stage_names
UpperCAmelCase_ = out_features
UpperCAmelCase_ = backbone
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = use_pretrained_backbone
UpperCAmelCase_ = is_training
def UpperCamelCase ( self : Any ) -> Dict:
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_ = TimmBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCamelCase ( self : Tuple ) -> Any:
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
__A = (TimmBackbone,) if is_torch_available() else ()
__A = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
__A = False
__A = False
__A = False
__A = False
def UpperCamelCase ( self : str ) -> Optional[Any]:
UpperCAmelCase_ = TimmBackboneModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def UpperCamelCase ( self : str ) -> Dict:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self : Dict ) -> List[str]:
UpperCAmelCase_ = '''resnet18'''
UpperCAmelCase_ = '''microsoft/resnet-18'''
UpperCAmelCase_ = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ )
UpperCAmelCase_ = AutoBackbone.from_pretrained(lowerCAmelCase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
UpperCAmelCase_ = AutoBackbone.from_pretrained(lowerCAmelCase_ , use_timm_backbone=lowerCAmelCase_ , out_indices=[1, 2, 3] )
UpperCAmelCase_ = AutoBackbone.from_pretrained(lowerCAmelCase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def UpperCamelCase ( self : List[Any] ) -> List[str]:
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def UpperCamelCase ( self : str ) -> str:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def UpperCamelCase ( self : Any ) -> str:
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def UpperCamelCase ( self : Tuple ) -> List[str]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def UpperCamelCase ( self : Tuple ) -> int:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def UpperCamelCase ( self : Union[str, Any] ) -> int:
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def UpperCamelCase ( self : List[str] ) -> Any:
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def UpperCamelCase ( self : str ) -> Optional[int]:
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def UpperCamelCase ( self : Any ) -> Tuple:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase ( self : int ) -> int:
pass
def UpperCamelCase ( self : Optional[int] ) -> str:
UpperCAmelCase_, UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase_ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def UpperCamelCase ( self : int ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
UpperCAmelCase_ = self.all_model_classes[0]
UpperCAmelCase_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = model(**lowerCAmelCase_ )
UpperCAmelCase_ = outputs[0][-1]
# Encoder-/Decoder-only models
UpperCAmelCase_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCAmelCase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCamelCase ( self : List[Any] ) -> Dict:
UpperCAmelCase_, UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ = model(**lowerCAmelCase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase_ )
UpperCAmelCase_ = False
UpperCAmelCase_ = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
UpperCAmelCase_ = model(**lowerCAmelCase_ )
| 407 | 0 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ = None , A_ = None , A_=None , A_=None )-> Optional[Any]:
'''simple docstring'''
if not conversation_id:
UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
UpperCamelCase = []
if generated_responses is None:
UpperCamelCase = []
UpperCamelCase = conversation_id
UpperCamelCase = past_user_inputs
UpperCamelCase = generated_responses
UpperCamelCase = text
def __eq__( self , A_ )-> List[Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self , A_ , A_ = False )-> int:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
UpperCamelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
UpperCamelCase = text
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
self.generated_responses.append(A_ )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self )-> Any:
'''simple docstring'''
UpperCamelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
UpperCamelCase = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
snake_case_ , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
super().__init__(*A_ , **A_ )
if self.tokenizer.pad_token_id is None:
UpperCamelCase = self.tokenizer.eos_token
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {}
UpperCamelCase = {}
UpperCamelCase = {}
if min_length_for_response is not None:
UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
UpperCamelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A_ , A_=0 , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = super().__call__(A_ , num_workers=A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self , A_ , A_=32 )-> Dict[str, Any]:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
UpperCamelCase = self.tokenizer._build_conversation_input_ids(A_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
UpperCamelCase = self._legacy_parse_and_tokenize(A_ )
if self.framework == "pt":
UpperCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
UpperCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=10 , **A_ )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
UpperCamelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
UpperCamelCase = max_length - minimum_tokens
UpperCamelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
UpperCamelCase = model_inputs['attention_mask'][:, -trim:]
UpperCamelCase = model_inputs.pop('conversation' )
UpperCamelCase = max_length
UpperCamelCase = self.model.generate(**A_ , **A_ )
if self.model.config.is_encoder_decoder:
UpperCamelCase = 1
else:
UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self , A_ , A_=True )-> Tuple:
'''simple docstring'''
UpperCamelCase = model_outputs['output_ids']
UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
UpperCamelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(A_ )
return conversation
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = self.tokenizer.eos_token_id
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A_ , add_special_tokens=A_ ) )
if len(A_ ) > self.tokenizer.model_max_length:
UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 3 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def a (_lowerCAmelCase ):
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE_ = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE_ = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
SCREAMING_SNAKE_CASE_ = DetrConfig(use_timm_backbone=_lowerCAmelCase , backbone_config=_lowerCAmelCase )
# set label attributes
SCREAMING_SNAKE_CASE_ = '''panoptic''' in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE_ = 2_5_0
else:
SCREAMING_SNAKE_CASE_ = 9_1
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''coco-detection-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def a (_lowerCAmelCase ):
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
F"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
F"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = state_dict.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = val
def a (_lowerCAmelCase , _lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ = ''''''
if is_panoptic:
SCREAMING_SNAKE_CASE_ = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[:2_5_6, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[:2_5_6]
SCREAMING_SNAKE_CASE_ = in_proj_weight[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE_ = in_proj_weight[-2_5_6:, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE_ = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[:2_5_6, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[:2_5_6]
SCREAMING_SNAKE_CASE_ = in_proj_weight[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE_ = in_proj_weight[-2_5_6:, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE_ = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight_cross_attn[:2_5_6, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias_cross_attn[:2_5_6]
SCREAMING_SNAKE_CASE_ = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias_cross_attn[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE_ = in_proj_weight_cross_attn[-2_5_6:, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias_cross_attn[-2_5_6:]
def a ():
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def a (_lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_detr_config(_lowerCAmelCase )
# load original model from torch hub
SCREAMING_SNAKE_CASE_ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(F"Converting model {model_name}..." )
SCREAMING_SNAKE_CASE_ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=_lowerCAmelCase ).eval()
SCREAMING_SNAKE_CASE_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_lowerCAmelCase ):
if is_panoptic:
SCREAMING_SNAKE_CASE_ = '''detr.''' + src
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCAmelCase , is_panoptic=_lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE_ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
SCREAMING_SNAKE_CASE_ = state_dict.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE_ = state_dict.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
SCREAMING_SNAKE_CASE_ = state_dict.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
SCREAMING_SNAKE_CASE_ = state_dict.pop(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE_ = DetrForSegmentation(_lowerCAmelCase ) if is_panoptic else DetrForObjectDetection(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE_ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
SCREAMING_SNAKE_CASE_ = DetrImageProcessor(format=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = encoding['''pixel_values''']
SCREAMING_SNAKE_CASE_ = detr(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(F"nielsr/{model_name}" )
processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE ="""%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
__SCREAMING_SNAKE_CASE =f"""https://www.google.com/search?q={query}&num=100"""
__SCREAMING_SNAKE_CASE =requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
__SCREAMING_SNAKE_CASE =(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
__SCREAMING_SNAKE_CASE =parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 89 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = ['pixel_values']
def __init__( self ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = PILImageResampling.BILINEAR ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = True ,_lowerCAmelCase = 1 / 2_55 ,_lowerCAmelCase = True ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 2_56}
lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase )
lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,param_name="""crop_size""" )
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = resample
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = do_rescale
lowerCamelCase__ = rescale_factor
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = PILImageResampling.BICUBIC ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase__ = get_resize_output_image_size(_lowerCAmelCase ,size=size["""shortest_edge"""] ,default_to_square=_lowerCAmelCase )
return resize(_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
lowerCamelCase__ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase ,size=(size["""height"""], size["""width"""]) ,data_format=_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,**_lowerCAmelCase ):
return rescale(_lowerCAmelCase ,scale=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,**_lowerCAmelCase ,):
return normalize(_lowerCAmelCase ,mean=_lowerCAmelCase ,std=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = ChannelDimension.FIRST ,**_lowerCAmelCase ,):
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = size if size is not None else self.size
lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase )
lowerCamelCase__ = resample if resample is not None else self.resample
lowerCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ = get_size_dict(_lowerCAmelCase ,param_name="""crop_size""" )
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ = image_std if image_std is not None else self.image_std
lowerCamelCase__ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase__ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase__ = [self.resize(image=_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ = [self.center_crop(image=_lowerCAmelCase ,size=_lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ = [self.rescale(image=_lowerCAmelCase ,scale=_lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase__ = [self.normalize(image=_lowerCAmelCase ,mean=_lowerCAmelCase ,std=_lowerCAmelCase ) for image in images]
lowerCamelCase__ = [to_channel_dimension_format(_lowerCAmelCase ,_lowerCAmelCase ) for image in images]
lowerCamelCase__ = {"""pixel_values""": images}
return BatchFeature(data=_lowerCAmelCase ,tensor_type=_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_lowerCAmelCase ):
lowerCamelCase__ = target_sizes.numpy()
lowerCamelCase__ = []
for idx in range(len(_lowerCAmelCase ) ):
lowerCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=_lowerCAmelCase )
lowerCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCAmelCase )
else:
lowerCamelCase__ = logits.argmax(dim=1 )
lowerCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 50 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( UpperCamelCase : list[int] , UpperCamelCase : int ):
UpperCAmelCase : list[list[int]] = []
UpperCAmelCase : list[int] = []
UpperCAmelCase : int = 0
UpperCAmelCase : int = sum(UpperCamelCase )
create_state_space_tree(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return result
def _snake_case ( UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : list[list[int]] , UpperCamelCase : int , ):
if sum(UpperCamelCase ) > max_sum or (remaining_nums_sum + sum(UpperCamelCase )) < max_sum:
return
if sum(UpperCamelCase ) == max_sum:
result.append(UpperCamelCase )
return
for index in range(UpperCamelCase , len(UpperCamelCase ) ):
create_state_space_tree(
UpperCamelCase , UpperCamelCase , index + 1 , [*path, nums[index]] , UpperCamelCase , remaining_nums_sum - nums[index] , )
A: Tuple = [3, 3_4, 4, 1_2, 5, 2]
A: Optional[int] = 9
A: List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 359 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
A: Optional[int] = logging.get_logger(__name__)
A: Union[str, Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[str] = 'gpt_neo'
__lowerCAmelCase : Any = ['past_key_values']
__lowerCAmelCase : int = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , _SCREAMING_SNAKE_CASE=50257 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=50256 , _SCREAMING_SNAKE_CASE=50256 , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
UpperCAmelCase : int = vocab_size
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : str = hidden_size
UpperCAmelCase : Dict = num_layers
UpperCAmelCase : List[Any] = num_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Dict = window_size
UpperCAmelCase : int = activation_function
UpperCAmelCase : Tuple = resid_dropout
UpperCAmelCase : int = embed_dropout
UpperCAmelCase : Optional[int] = attention_dropout
UpperCAmelCase : Optional[int] = classifier_dropout
UpperCAmelCase : Any = layer_norm_epsilon
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : List[str] = use_cache
UpperCAmelCase : Union[str, Any] = bos_token_id
UpperCAmelCase : str = eos_token_id
UpperCAmelCase : Union[str, Any] = attention_types
UpperCAmelCase : int = self.expand_attention_types_params(_SCREAMING_SNAKE_CASE )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
F"`config.num_layers = {self.num_layers}`. "
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ):
import torch
UpperCAmelCase : Optional[Any] = input.size()
UpperCAmelCase : int = len(UpperCamelCase )
UpperCAmelCase : Optional[Any] = shape[dimension]
UpperCAmelCase : Union[str, Any] = torch.arange(0 , UpperCamelCase , UpperCamelCase )
UpperCAmelCase : List[Any] = torch.div(sizedim - size , UpperCamelCase , rounding_mode="""floor""" ) + 1
UpperCAmelCase : Optional[Any] = torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
UpperCAmelCase : List[Any] = [slice(UpperCamelCase )] * rank
UpperCAmelCase : Optional[Any] = indices
UpperCAmelCase : Optional[Any] = input[s]
UpperCAmelCase : List[Any] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Dict ):
import torch
UpperCAmelCase : List[str] = torch.arange(1 , UpperCamelCase )
UpperCAmelCase : Any = torch.remainder(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : Union[str, Any] = remainders == 0
UpperCAmelCase : str = candidates[divisor_indices]
UpperCAmelCase : Optional[int] = torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="""floor""" )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
UpperCAmelCase : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="""inputs""" )
UpperCAmelCase : str = {0: """batch""", 1: """past_sequence + sequence"""}
else:
UpperCAmelCase : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._config.num_heads
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Dict = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase , UpperCAmelCase : List[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase : Any = seqlen + 2
UpperCAmelCase : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
UpperCAmelCase : List[str] = common_inputs["""attention_mask"""]
if self.use_past:
UpperCAmelCase : str = ordered_inputs["""attention_mask"""].dtype
UpperCAmelCase : int = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 13
| 359 | 1 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCAmelCase : Any = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_UpperCAmelCase : str = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_UpperCAmelCase : List[str] = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def _A( self , snake_case_ , snake_case_ , snake_case_ = 1 , snake_case_ = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case_ , hypotheses=snake_case_ , min_len=snake_case_ , max_len=snake_case_ )
}
| 72 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'informer'
__lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = "mean" , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 64 , _lowerCAmelCase = 32 , _lowerCAmelCase = 32 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = True , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.0_5 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.0_2 , _lowerCAmelCase=True , _lowerCAmelCase = "prob" , _lowerCAmelCase = 5 , _lowerCAmelCase = True , **_lowerCAmelCase , ):
# time series specific configuration
UpperCAmelCase__ : List[str] = prediction_length
UpperCAmelCase__ : Optional[Any] = context_length or prediction_length
UpperCAmelCase__ : str = distribution_output
UpperCAmelCase__ : int = loss
UpperCAmelCase__ : Optional[Any] = input_size
UpperCAmelCase__ : Any = num_time_features
UpperCAmelCase__ : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase__ : Union[str, Any] = scaling
UpperCAmelCase__ : Optional[Any] = num_dynamic_real_features
UpperCAmelCase__ : List[str] = num_static_real_features
UpperCAmelCase__ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
UpperCAmelCase__ : List[str] = cardinality
else:
UpperCAmelCase__ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
UpperCAmelCase__ : str = embedding_dimension
else:
UpperCAmelCase__ : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase__ : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase__ : Dict = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase__ : Any = d_model
UpperCAmelCase__ : int = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_attention_heads
UpperCAmelCase__ : int = encoder_ffn_dim
UpperCAmelCase__ : Tuple = decoder_ffn_dim
UpperCAmelCase__ : List[Any] = encoder_layers
UpperCAmelCase__ : Optional[Any] = decoder_layers
UpperCAmelCase__ : Tuple = dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : List[str] = activation_dropout
UpperCAmelCase__ : Any = encoder_layerdrop
UpperCAmelCase__ : Union[str, Any] = decoder_layerdrop
UpperCAmelCase__ : Tuple = activation_function
UpperCAmelCase__ : Dict = init_std
UpperCAmelCase__ : str = use_cache
# Informer
UpperCAmelCase__ : Union[str, Any] = attention_type
UpperCAmelCase__ : int = sampling_factor
UpperCAmelCase__ : Any = distil
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase )
@property
def __UpperCAmelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 79 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = len(lowercase__ )
lowerCAmelCase_ :str = len(lowercase__ )
lowerCAmelCase_ :List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowerCAmelCase_ :Union[str, Any] = True
for i in range(lowercase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCAmelCase_ :Union[str, Any] = True
if a[i].islower():
lowerCAmelCase_ :Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 |
"""simple docstring"""
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[str] = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def _snake_case ( lowercase__ : int = 1_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = 1
lowerCAmelCase_ :int = 2
for i in range(2 , max_n + 1 ):
lowerCAmelCase_ :Optional[Any] = pre_numerator
lowerCAmelCase_ :int = 2 * i // 3 if i % 3 == 0 else 1
lowerCAmelCase_ :str = cur_numerator
lowerCAmelCase_ :List[Any] = e_cont * pre_numerator + temp
return sum_digits(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 256 | 1 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
__A = parser.parse_args()
__A = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 325 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None
SCREAMING_SNAKE_CASE_ : torch.FloatTensor = None
SCREAMING_SNAKE_CASE_ : Optional[Tuple[torch.FloatTensor]] = None
SCREAMING_SNAKE_CASE_ : Optional[Tuple[torch.FloatTensor]] = None
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=512 , lowerCAmelCase__="cls" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Tuple:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = project_dim
SCREAMING_SNAKE_CASE = pooler_fn
SCREAMING_SNAKE_CASE = learn_encoder
SCREAMING_SNAKE_CASE = use_attention_mask
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [R"""pooler""", R"""logit_scale"""]
SCREAMING_SNAKE_CASE_ : List[Any] = [R"""position_ids""", R"""predictions.decoder.bias"""]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """roberta"""
SCREAMING_SNAKE_CASE_ : Dict = RobertaSeriesConfig
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = XLMRobertaModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , 'has_pre_transformation' , lowerCAmelCase__ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __A ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.base_model(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCAmelCase__ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE = outputs['hidden_states'][-2]
SCREAMING_SNAKE_CASE = self.pre_LN(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.transformation_pre(lowerCAmelCase__ )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 247 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_UpperCamelCase = logging.getLogger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__snake_case : Optional[str] = field(
default=snake_case__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__snake_case : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
__snake_case : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__snake_case : bool = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : Dict =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
__lowerCamelCase : str =processors[data_args.task_name]()
__lowerCamelCase : Optional[Any] =processor.get_labels()
__lowerCamelCase : List[Any] =len(SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : Tuple =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
__lowerCamelCase : int =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase : Union[str, Any] =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
__lowerCamelCase : Union[str, Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__lowerCamelCase : Optional[int] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(SCREAMING_SNAKE_CASE : EvalPrediction ) -> Dict:
__lowerCamelCase : Dict =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
__lowerCamelCase : Optional[int] =DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__lowerCamelCase : Union[str, Any] =Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCamelCase : str ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowerCamelCase : int =trainer.evaluate()
__lowerCamelCase : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
return results
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 363 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as flax_state_f:
__lowerCamelCase : int =from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__lowerCamelCase : str =flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__lowerCamelCase : List[str] =jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =''''''
__lowerCamelCase : List[str] =flatten_dict(SCREAMING_SNAKE_CASE , sep='''.''' )
__lowerCamelCase : List[str] =pt_model.state_dict()
# keep track of unexpected & missing keys
__lowerCamelCase : Union[str, Any] =[]
__lowerCamelCase : int =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowerCamelCase : Tuple =flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__lowerCamelCase : Optional[int] =flax_key_tuple_array[:-1] + ['''weight''']
__lowerCamelCase : Union[str, Any] =jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__lowerCamelCase : Optional[Any] =flax_key_tuple_array[:-1] + ['''weight''']
__lowerCamelCase : Dict =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__lowerCamelCase : str =flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] =(
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__lowerCamelCase : Optional[int] ='''.'''.join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__lowerCamelCase : Tuple =np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
__lowerCamelCase : Any =torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
__lowerCamelCase : Optional[Any] =list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 363 | 1 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = False , ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE : str = nn.Embedding(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Embedding(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = nn.Dropout(p=_snake_case )
SCREAMING_SNAKE_CASE : List[Any] = TaConfig(
vocab_size=_snake_case , d_model=_snake_case , num_heads=_snake_case , d_kv=_snake_case , d_ff=_snake_case , dropout_rate=_snake_case , feed_forward_proj=_snake_case , is_decoder=_snake_case , is_encoder_decoder=_snake_case , )
SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList()
for lyr_num in range(_snake_case ):
SCREAMING_SNAKE_CASE : Union[str, Any] = TaBlock(_snake_case )
self.encoders.append(_snake_case )
SCREAMING_SNAKE_CASE : Dict = TaLayerNorm(_snake_case )
SCREAMING_SNAKE_CASE : List[Any] = nn.Dropout(p=_snake_case )
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.token_embedder(_snake_case )
SCREAMING_SNAKE_CASE : Dict = encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE : Any = torch.arange(_snake_case , device=encoder_input_tokens.device )
x += self.position_encoding(_snake_case )
SCREAMING_SNAKE_CASE : Any = self.dropout_pre(_snake_case )
# inverted the attention mask
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_input_tokens.size()
SCREAMING_SNAKE_CASE : List[str] = self.get_extended_attention_mask(_snake_case , _snake_case )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE : Optional[int] = lyr(_snake_case , _snake_case )[0]
SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(_snake_case )
return self.dropout_post(_snake_case ), encoder_inputs_mask
| 251 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : Any = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowercase : int = TOKENIZER_CLASSES
else:
__lowercase : str = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + '''Fast''' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowercase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowercase : List[Any] = True
if checkpoint_name is None:
__lowercase : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase : Any = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowercase : Optional[Any] = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase : Union[str, Any] = checkpoint.split('''/''' )
__lowercase : str = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
__lowercase : int = checkpoint
__lowercase : Dict = dump_path
else:
__lowercase : Dict = None
__lowercase : Union[str, Any] = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase : List[str] = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
__lowercase : Union[str, Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
__lowercase : List[Any] = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowercase : str = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 509 | 0 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCamelCase = 1.054_571_817E-34 # unit of ℏ : J * s
_lowerCamelCase = 3E8 # unit of c : m * s^-1
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
if (force, area, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if force < 0:
raise ValueError('''Magnitude of force can not be negative''' )
if distance < 0:
raise ValueError('''Distance can not be negative''' )
if area < 0:
raise ValueError('''Area can not be negative''' )
if force == 0:
_lowerCamelCase : Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCamelCase : Any = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCamelCase : int = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 613 |
def __UpperCAmelCase( lowercase_ = 10_00 ):
_lowerCamelCase : Optional[int] = 2**power
_lowerCamelCase : Optional[int] = 0
while n:
_lowerCamelCase, _lowerCamelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 613 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = "dpt"
def __init__( self : Optional[int] , __lowerCAmelCase : List[Any]=7_6_8 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : Tuple=1_2 , __lowerCAmelCase : List[Any]=3_0_7_2 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Dict=3_8_4 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : int=[2, 5, 8, 1_1] , __lowerCAmelCase : Optional[Any]="project" , __lowerCAmelCase : Optional[Any]=[4, 2, 1, 0.5] , __lowerCAmelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : Union[str, Any]=-1 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : List[str]=0.4 , __lowerCAmelCase : List[Any]=2_5_5 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Any=[1, 1_0_2_4, 2_4, 2_4] , __lowerCAmelCase : List[Any]=[0, 1] , __lowerCAmelCase : int=None , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
_lowerCamelCase : str = hidden_size
_lowerCamelCase : str = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_lowerCamelCase : Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_lowerCamelCase : Any = BitConfig(**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_lowerCamelCase : Tuple = BitConfig(**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : str = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_lowerCamelCase : List[Any] = backbone_featmap_shape
_lowerCamelCase : str = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_lowerCamelCase : Dict = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[Any] = []
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : int = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : Dict = qkv_bias
_lowerCamelCase : Dict = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_lowerCamelCase : List[Any] = readout_type
_lowerCamelCase : Optional[int] = reassemble_factors
_lowerCamelCase : Optional[int] = neck_hidden_sizes
_lowerCamelCase : Optional[Any] = fusion_hidden_size
_lowerCamelCase : Tuple = head_in_index
_lowerCamelCase : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_lowerCamelCase : str = use_auxiliary_head
_lowerCamelCase : Optional[int] = auxiliary_loss_weight
_lowerCamelCase : List[str] = semantic_loss_ignore_index
_lowerCamelCase : Dict = semantic_classifier_dropout
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCamelCase : Any = self.backbone_config.to_dict()
_lowerCamelCase : str = self.__class__.model_type
return output
| 83 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] ={'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] =[
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 172 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase__ = 1_6
lowerCAmelCase__ = 3_2
def lowerCamelCase_ ( UpperCAmelCase_ : Accelerator , UpperCAmelCase_ : int = 1_6 , UpperCAmelCase_ : str = "bert-base-cased" ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCAmelCase_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCamelCase : int = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCAmelCase_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_UpperCamelCase : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
_UpperCamelCase : Any = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
_UpperCamelCase : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase : int = config['lr']
_UpperCamelCase : Union[str, Any] = int(config['num_epochs'] )
_UpperCamelCase : Optional[int] = int(config['seed'] )
_UpperCamelCase : Dict = int(config['batch_size'] )
_UpperCamelCase : Tuple = args.model_name_or_path
set_seed(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase : Any = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
# Instantiate optimizer
_UpperCamelCase : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCamelCase : int = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
_UpperCamelCase : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = (len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCamelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , )
else:
_UpperCamelCase : Any = DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase : str = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
_UpperCamelCase : int = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCamelCase : Union[str, Any] = 0
# Now we train the model
_UpperCamelCase : str = evaluate.load('glue' , 'mrpc' )
_UpperCamelCase : Dict = 0
_UpperCamelCase : List[str] = {}
for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
_UpperCamelCase : List[str] = model(**UpperCAmelCase_ )
_UpperCamelCase : List[str] = outputs.loss
_UpperCamelCase : Any = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_UpperCamelCase : Optional[int] = 0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase : List[str] = model(**UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCamelCase : str = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase_ ) - 1:
_UpperCamelCase : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCamelCase : Tuple = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
_UpperCamelCase : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase_ )
_UpperCamelCase : Any = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
_UpperCamelCase : Optional[Any] = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase_ , default=3 , help='Number of train epochs.' , )
_UpperCamelCase : Tuple = parser.parse_args()
_UpperCamelCase : Optional[int] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 702 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , __snake_case=32):
set_seed(0)
_UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3)
_UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A__ ( self):
_UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_UpperCamelCase : List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
_UpperCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
_UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)]
_UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)]
_UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)]
# train with a DDPM scheduler
_UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32)
model.train().to(__snake_case)
for i in range(4):
optimizer.zero_grad()
_UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
_UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample
_UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
| 648 | 0 |
'''simple docstring'''
from collections import defaultdict
def A (__lowerCamelCase :str , __lowerCamelCase :str ):
_lowerCAmelCase = first_str.lower().strip()
_lowerCAmelCase = second_str.lower().strip()
# Remove whitespace
_lowerCAmelCase = first_str.replace(""" """ , """""" )
_lowerCAmelCase = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
return False
# Default values for count should be 0
_lowerCAmelCase = defaultdict(__lowerCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = input("""Enter the first string """).strip()
_lowercase = input("""Enter the second string """).strip()
_lowercase = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 5 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return (preds == labels).mean()
@dataclass
class _lowerCamelCase :
"""simple docstring"""
snake_case = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class _lowerCamelCase :
"""simple docstring"""
snake_case = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
snake_case = field(metadata={"help": "Should contain the data files for the task."} )
snake_case = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A_ , A_ , A_ : List[Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
A_ : int = processors[data_args.task_name]()
A_ : Optional[int] = processor.get_labels()
A_ : Tuple = len(SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
A_ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A_ : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
A_ : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A_ : List[str] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
A_ : str = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
A_ : Dict = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A_ : Optional[int] = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A_ : str = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A_ : str = trainer.evaluate()
A_ : Dict = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('''%s = %s\n''' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
return results
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 152 |
UpperCamelCase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCamelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 152 | 1 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCamelCase = None
def lowerCAmelCase_ ( __A : Optional[Any] , __A : str , ):
'''simple docstring'''
import pyspark
def generate_fn():
snake_case: List[str] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
snake_case: Tuple = df_with_partition_id.select('*' ).where(f"""part_id = {partition_id}""" ).drop('part_id' )
snake_case: Optional[int] = partition_df.collect()
snake_case: int = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class SCREAMING_SNAKE_CASE ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
'''simple docstring'''
snake_case: Dict = df
snake_case: List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case: int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return len(self.partition_order )
class SCREAMING_SNAKE_CASE ( datasets.DatasetBuilder ):
'''simple docstring'''
__UpperCamelCase = SparkConfig
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
import pyspark
snake_case: List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case: Optional[int] = df
snake_case: int = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def _UpperCamelCase ( self ):
'''simple docstring'''
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case: str = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case: Dict = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def _UpperCamelCase ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
snake_case: str = self.df.count()
snake_case: Any = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case: Any = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case: Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case: Tuple = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
snake_case: Any = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
import pyspark
snake_case: Optional[int] = ParquetWriter if file_format == 'parquet' else ArrowWriter
snake_case: Tuple = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
snake_case: Optional[Any] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case: Any = self.config.features
snake_case: List[Any] = self._writer_batch_size
snake_case: Tuple = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE__ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case: int = pyspark.TaskContext().taskAttemptId()
snake_case: int = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
snake_case: Dict = 0
snake_case: Optional[int] = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
snake_case: List[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case: Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
snake_case: Tuple = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
snake_case: Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
snake_case: List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
snake_case: Optional[int] = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "arrow" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
self._validate_cache_dir()
snake_case: str = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
snake_case: Any = not is_remote_filesystem(self._fs )
snake_case: str = os.path.join if is_local else posixpath.join
snake_case: List[Any] = '-TTTTT-SSSSS-of-NNNNN'
snake_case: List[Any] = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
snake_case: Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = 0
snake_case: Dict = 0
snake_case: int = 0
snake_case: Any = []
snake_case: Optional[int] = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
snake_case
): str = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
snake_case: Any = total_num_examples
snake_case: Union[str, Any] = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
snake_case: List[str] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case: List[str] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace('TTTTT-SSSSS' , F"""{global_shard_id:05d}""" ).replace('NNNNN' , F"""{total_shards:05d}""" ) , )
snake_case: int = []
snake_case: Tuple = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
snake_case: Dict = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
snake_case: Optional[Any] = 0
snake_case: int = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F"""{shard_id:05d}""" ).replace('TTTTT' , F"""{task_id:05d}""" ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
return SparkExamplesIterable(self.df ) | 329 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ ):
_lowerCamelCase : int = len(lowercase__ )
# We need to create solution object to save path.
_lowerCamelCase : Tuple = [[0 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
_lowerCamelCase : Optional[Any] = run_maze(lowercase__ , 0 , 0 , lowercase__ )
if solved:
print('\n'.join(str(lowercase__ ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[int] = len(lowercase__ )
# Final check point.
if i == j == (size - 1):
_lowerCamelCase : Optional[Any] = 1
return True
_lowerCamelCase : List[str] = (not i < 0) and (not j < 0) # Check lower bounds
_lowerCamelCase : List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_lowerCamelCase : List[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_lowerCamelCase : Union[str, Any] = 1
# check for directions
if (
run_maze(lowercase__ , i + 1 , lowercase__ , lowercase__ )
or run_maze(lowercase__ , lowercase__ , j + 1 , lowercase__ )
or run_maze(lowercase__ , i - 1 , lowercase__ , lowercase__ )
or run_maze(lowercase__ , lowercase__ , j - 1 , lowercase__ )
):
return True
_lowerCamelCase : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 630 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = 5
# Realm tok
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
def _A ( self: str ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def _A ( self: List[str] ):
shutil.rmtree(self.tmpdirname )
def _A ( self: Tuple ):
SCREAMING_SNAKE_CASE_ = RealmConfig(num_block_records=self.num_block_records )
return config
def _A ( self: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = np.array(
[
B'''This is the first record''',
B'''This is the second record''',
B'''This is the third record''',
B'''This is the fourth record''',
B'''This is the fifth record''',
B'''This is a longer longer longer record''',
] , dtype=_lowerCamelCase , )
return block_records
def _A ( self: str ):
SCREAMING_SNAKE_CASE_ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = self.get_dummy_retriever()
SCREAMING_SNAKE_CASE_ = retriever.tokenizer
SCREAMING_SNAKE_CASE_ = np.array([0, 3] , dtype='''long''' )
SCREAMING_SNAKE_CASE_ = tokenizer(['''Test question'''] ).input_ids
SCREAMING_SNAKE_CASE_ = tokenizer(
['''the fourth'''] , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ).input_ids
SCREAMING_SNAKE_CASE_ = config.reader_seq_len
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever(
_lowerCamelCase , _lowerCamelCase , answer_ids=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors='''np''' )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(len(_lowerCamelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = self.get_dummy_retriever()
SCREAMING_SNAKE_CASE_ = retriever.tokenizer
SCREAMING_SNAKE_CASE_ = np.array([0, 3, 5] , dtype='''long''' )
SCREAMING_SNAKE_CASE_ = tokenizer(['''Test question'''] ).input_ids
SCREAMING_SNAKE_CASE_ = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ).input_ids
SCREAMING_SNAKE_CASE_ = config.reader_seq_len
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = retriever(
_lowerCamelCase , _lowerCamelCase , answer_ids=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , _lowerCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowerCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowerCamelCase )
def _A ( self: Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
SCREAMING_SNAKE_CASE_ = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
SCREAMING_SNAKE_CASE_ = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
SCREAMING_SNAKE_CASE_ = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
| 715 |
from typing import Any
def a (_lowerCAmelCase ):
if not input_list:
return []
SCREAMING_SNAKE_CASE_ = [input_list.count(_lowerCAmelCase ) for value in input_list]
SCREAMING_SNAKE_CASE_ = max(_lowerCAmelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(_lowerCAmelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCamelCase : Any = logging.get_logger(__name__)
def __lowerCAmelCase ( __snake_case , __snake_case ):
def run_func(__snake_case ):
@wraps(__snake_case )
def run_in_eager_mode(*__snake_case , **__snake_case ):
return func(*__snake_case , **__snake_case )
@wraps(__snake_case )
@tf.function(experimental_compile=__snake_case )
def run_in_graph_mode(*__snake_case , **__snake_case ):
return func(*__snake_case , **__snake_case )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = random.Random()
__lowerCAmelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__snake_case , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _UpperCamelCase (a_ ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = "TensorFlow"
@property
def __UpperCAmelCase ( self )-> List[Any]:
return tf.__version__
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
# initialize GPU on separate process
__lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__lowerCAmelCase = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_inference )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> float:
__lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__lowerCAmelCase = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_speed(_train )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
__lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__lowerCAmelCase = self._prepare_inference_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_inference )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __UpperCamelCase )
__lowerCAmelCase = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__lowerCAmelCase = self._prepare_train_func(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self._measure_memory(_train )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
__lowerCAmelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__lowerCAmelCase = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowerCAmelCase = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__lowerCAmelCase = __import__("transformers" , fromlist=[model_class] )
__lowerCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__lowerCAmelCase = TF_MODEL_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
__lowerCAmelCase = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
__lowerCAmelCase = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , training=__UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__UpperCamelCase , training=__UpperCamelCase )
__lowerCAmelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Callable[[], None]:
__lowerCAmelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__lowerCAmelCase = (
hasattr(__UpperCamelCase , "architectures" )
and isinstance(config.architectures , __UpperCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__lowerCAmelCase = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__lowerCAmelCase = __import__("transformers" , fromlist=[model_class] )
__lowerCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = model_cls(__UpperCamelCase )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__lowerCAmelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__UpperCamelCase )
# encoder-decoder has vocab size saved differently
__lowerCAmelCase = config.vocab_size if hasattr(__UpperCamelCase , "vocab_size" ) else config.encoder.vocab_size
__lowerCAmelCase = random_input_ids(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__lowerCAmelCase = model(__UpperCamelCase , decoder_input_ids=__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
__lowerCAmelCase = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__lowerCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )[0]
__lowerCAmelCase = tf.gradients(__UpperCamelCase , model.trainable_variables )
return gradients
__lowerCAmelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCAmelCase ( self , __UpperCamelCase )-> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__UpperCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__lowerCAmelCase = timeit.repeat(
__UpperCamelCase , repeat=self.args.repeat , number=1_0 , )
return min(__UpperCamelCase ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def __UpperCAmelCase ( self , __UpperCamelCase )-> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
__lowerCAmelCase = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
__lowerCAmelCase = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
__lowerCAmelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__lowerCAmelCase = nvml.nvmlDeviceGetMemoryInfo(__UpperCamelCase )
__lowerCAmelCase = meminfo.used
__lowerCAmelCase = Memory(__UpperCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
__lowerCAmelCase = None
else:
__lowerCAmelCase = measure_peak_memory_cpu(__UpperCamelCase )
__lowerCAmelCase = Memory(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
__lowerCAmelCase = stop_memory_tracing(__UpperCamelCase )
if memory is None:
__lowerCAmelCase = summary.total
else:
__lowerCAmelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 367 |
import itertools
import string
from collections.abc import Generator, Iterable
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = iter(__snake_case )
while True:
__lowerCAmelCase = tuple(itertools.islice(__snake_case , __snake_case ) )
if not chunk:
return
yield chunk
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
__lowerCAmelCase = ""
if len(__snake_case ) < 2:
return dirty
for i in range(len(__snake_case ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__snake_case ) & 1:
clean += "X"
return clean
def __lowerCAmelCase ( __snake_case ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__lowerCAmelCase = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__lowerCAmelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__snake_case )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__snake_case )
return table
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = generate_table(__snake_case )
__lowerCAmelCase = prepare_input(__snake_case )
__lowerCAmelCase = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__snake_case , 2 ):
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = generate_table(__snake_case )
__lowerCAmelCase = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__snake_case , 2 ):
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(__snake_case ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 367 | 1 |
from collections import deque
from .hash_table import HashTable
class _lowerCamelCase (lowerCamelCase ):
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(SCREAMING_SNAKE_CASE_ )
__snake_case = self.values[key]
def __lowerCamelCase ( self ):
return (
sum(self.charge_factor - len(SCREAMING_SNAKE_CASE_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(SCREAMING_SNAKE_CASE_ ) == 0
):
return key
return super()._collision_resolution(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 345 |
from ..utils import DummyObject, requires_backends
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
class _lowerCamelCase (metaclass=lowerCamelCase ):
lowercase__ = ["""flax"""]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
@classmethod
def __lowerCamelCase ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ['flax'] )
| 345 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Optional[int] , *UpperCamelCase: Any , UpperCamelCase: Any=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: List[str] ) -> Union[str, Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
snake_case__ = eval_examples
snake_case__ = post_process_function
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: int , ) -> Dict[str, float]:
snake_case__ = gen_kwargs.copy()
snake_case__ = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
snake_case__ = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
snake_case__ = gen_kwargs
snake_case__ = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case__ = self.get_eval_dataloader(lowerCAmelCase_ )
snake_case__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case__ = self.compute_metrics
snake_case__ = None
snake_case__ = time.time()
snake_case__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case__ = eval_loop(
lowerCAmelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , metric_key_prefix=lowerCAmelCase_ , )
finally:
snake_case__ = compute_metrics
snake_case__ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCAmelCase_ , lowerCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
snake_case__ = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case__ = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case__ = metrics.pop(lowerCAmelCase_ )
metrics.update(output.metrics )
else:
snake_case__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCAmelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCAmelCase_ )
return metrics
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Dict , UpperCamelCase: str , UpperCamelCase: Optional[Any]=None , UpperCamelCase: str = "test" , **UpperCamelCase: List[str] ) -> Dict:
snake_case__ = gen_kwargs.copy()
snake_case__ = self.get_test_dataloader(lowerCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case__ = self.compute_metrics
snake_case__ = None
snake_case__ = time.time()
snake_case__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case__ = eval_loop(
lowerCAmelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCAmelCase_ , metric_key_prefix=lowerCAmelCase_ , )
finally:
snake_case__ = compute_metrics
snake_case__ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCAmelCase_ , lowerCAmelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case__ = self.post_process_function(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , 'predict' )
snake_case__ = self.compute_metrics(lowerCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case__ = metrics.pop(lowerCAmelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCAmelCase_ )
| 328 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
UpperCAmelCase__ = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
UpperCAmelCase__ = {
'''AI-Sweden/gpt-sw3-126m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-350m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-1.6b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-6.7b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-20b''': 2_0_4_8,
}
class a__ ( a_ ):
'''simple docstring'''
A : int = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : int = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Optional[Any] , ) -> None:
__A= {} if sp_model_kwargs is None else sp_model_kwargs
__A= kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
__A= 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__A= '<|endoftext|>' if eos_token is None else eos_token
__A= '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__A= unk_token if pad_token is None else pad_token
__A= eos_token if bos_token is None else bos_token
else:
__A= '<pad>' if pad_token is None else pad_token
__A= '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
__A= do_lower_case
__A= remove_space
__A= keep_accents
__A= vocab_file
__A= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
# Used for whitespace normalization in input texts
# fmt : off
__A= {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__A= re.compile(
F"""[{"".join(map(lowerCAmelCase_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]""" )
def __getstate__( self : Optional[int] ) -> Tuple:
__A= self.__dict__.copy()
__A= None
return state
def __setstate__( self : int , lowerCAmelCase_ : int ) -> Tuple:
__A= d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A= {}
__A= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCAmelCase ( self : Tuple ) -> int:
return len(self.sp_model )
def lowerCAmelCase ( self : int , lowerCAmelCase_ : str ) -> str:
__A= self.non_printing_characters_re.sub('' , lowerCAmelCase_ )
# Normalize whitespaces
__A= ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
__A= unicodedata.normalize('NFC' , lowerCAmelCase_ )
return text
def lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> List[str]:
__A= self.preprocess_text(lowerCAmelCase_ )
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def lowerCAmelCase ( self : Any , lowerCAmelCase_ : str ) -> int:
return self.sp_model.PieceToId(lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : int ) -> str:
return self.sp_model.IdToPiece(lowerCAmelCase_ )
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ : str ) -> str:
return out_string
def lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> str:
__A= []
__A= ''
__A= False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
__A= True
__A= []
else:
current_sub_tokens.append(lowerCAmelCase_ )
__A= False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string
def lowerCAmelCase ( self : List[Any] ) -> Dict[str, int]:
__A= {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A= os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
__A= self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
def lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__A= self.preprocess_text(lowerCAmelCase_ )
__A= self.sp_model.encode(lowerCAmelCase_ )
else:
__A= [self.preprocess_text(lowerCAmelCase_ ) for t in text]
__A= self.sp_model.encode(lowerCAmelCase_ )
if return_tensors is True or return_tensors == "pt":
__A= torch.tensor(lowerCAmelCase_ )
return token_ids
def lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Union[int, List[int]] ) -> str:
return self.sp_model.decode(lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : "Conversation" ) -> List[int]:
__A= [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__A= (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(lowerCAmelCase_ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=lowerCAmelCase_ )
| 186 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 't5'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , snake_case_=3_21_28 , snake_case_=5_12 , snake_case_=64 , snake_case_=20_48 , snake_case_=6 , snake_case_=None , snake_case_=8 , snake_case_=32 , snake_case_=1_28 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=1.0 , snake_case_="relu" , snake_case_=True , snake_case_=True , snake_case_=0 , snake_case_=1 , **snake_case_ , ):
lowercase =vocab_size
lowercase =d_model
lowercase =d_kv
lowercase =d_ff
lowercase =num_layers
lowercase =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase =num_heads
lowercase =relative_attention_num_buckets
lowercase =relative_attention_max_distance
lowercase =dropout_rate
lowercase =layer_norm_epsilon
lowercase =initializer_factor
lowercase =feed_forward_proj
lowercase =use_cache
lowercase =self.feed_forward_proj.split('''-''' )
lowercase =act_info[-1]
lowercase =act_info[0] == '''gated'''
if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase ='''gelu_new'''
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ , )
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
def _A( self ):
lowercase ={
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowercase ='''past_encoder_sequence + sequence'''
lowercase ={0: '''batch'''}
lowercase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase ={0: '''batch''', 1: '''decoder_sequence'''}
lowercase ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction='''inputs''' )
return common_inputs
@property
def _A( self ):
return 13
| 145 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Optional[Any] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 145 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 58 |
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = name
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = weight
def __repr__( self ):
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowercase_ ( self ):
'''simple docstring'''
return self.value
def lowercase_ ( self ):
'''simple docstring'''
return self.name
def lowercase_ ( self ):
'''simple docstring'''
return self.weight
def lowercase_ ( self ):
'''simple docstring'''
return self.value / self.weight
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = []
for i in range(len(lowerCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = sorted(lowerCAmelCase_ , key=lowerCAmelCase_ , reverse=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.0, 0.0
for i in range(len(lowerCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __snake_case ( ) -> str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : List[str] = 'mgp-str'
def __init__( self : Any , lowercase__ : int=[32, 1_28] , lowercase__ : List[Any]=4 , lowercase__ : List[str]=3 , lowercase__ : str=27 , lowercase__ : Union[str, Any]=38 , lowercase__ : List[str]=5_02_57 , lowercase__ : str=3_05_22 , lowercase__ : Dict=7_68 , lowercase__ : Optional[Any]=12 , lowercase__ : List[Any]=12 , lowercase__ : Tuple=4.0 , lowercase__ : Any=True , lowercase__ : Union[str, Any]=False , lowercase__ : List[Any]=1e-5 , lowercase__ : Any=0.0 , lowercase__ : Any=0.0 , lowercase__ : List[str]=0.0 , lowercase__ : int=False , lowercase__ : Optional[Any]=0.02 , **lowercase__ : Any , ) ->Optional[int]:
"""simple docstring"""
super().__init__(**lowercase__)
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = max_token_length
_lowercase = num_character_labels
_lowercase = num_bpe_labels
_lowercase = num_wordpiece_labels
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = mlp_ratio
_lowercase = distilled
_lowercase = layer_norm_eps
_lowercase = drop_rate
_lowercase = qkv_bias
_lowercase = attn_drop_rate
_lowercase = drop_path_rate
_lowercase = output_aa_attentions
_lowercase = initializer_range
| 572 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
_lowerCamelCase = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
_lowerCamelCase = {
'facebook/bart-base': 1_0_2_4,
'facebook/bart-large': 1_0_2_4,
'facebook/bart-large-mnli': 1_0_2_4,
'facebook/bart-large-cnn': 1_0_2_4,
'facebook/bart-large-xsum': 1_0_2_4,
'yjernite/bart_eli5': 1_0_2_4,
}
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[Any] = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Union[str, Any] = BartTokenizer
def __init__( self : Tuple , lowercase__ : Optional[int]=None , lowercase__ : Union[str, Any]=None , lowercase__ : Optional[int]=None , lowercase__ : Dict="replace" , lowercase__ : Tuple="<s>" , lowercase__ : Union[str, Any]="</s>" , lowercase__ : Optional[int]="</s>" , lowercase__ : Tuple="<s>" , lowercase__ : str="<unk>" , lowercase__ : Optional[Any]="<pad>" , lowercase__ : Optional[Any]="<mask>" , lowercase__ : Union[str, Any]=False , lowercase__ : str=True , **lowercase__ : Dict , ) ->List[str]:
"""simple docstring"""
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ , **lowercase__ , )
_lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("""add_prefix_space""" , lowercase__) != add_prefix_space:
_lowercase = getattr(lowercase__ , pre_tok_state.pop("""type"""))
_lowercase = add_prefix_space
_lowercase = pre_tok_class(**lowercase__)
_lowercase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowercase = """post_processor"""
_lowercase = getattr(self.backend_tokenizer , lowercase__ , lowercase__)
if tokenizer_component_instance:
_lowercase = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowercase = tuple(state["""sep"""])
if "cls" in state:
_lowercase = tuple(state["""cls"""])
_lowercase = False
if state.get("""add_prefix_space""" , lowercase__) != add_prefix_space:
_lowercase = add_prefix_space
_lowercase = True
if state.get("""trim_offsets""" , lowercase__) != trim_offsets:
_lowercase = trim_offsets
_lowercase = True
if changes_to_apply:
_lowercase = getattr(lowercase__ , state.pop("""type"""))
_lowercase = component_class(**lowercase__)
setattr(self.backend_tokenizer , lowercase__ , lowercase__)
@property
def _UpperCAmelCase ( self : List[Any]) ->str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""")
return None
return str(self._mask_token)
@mask_token.setter
def _UpperCAmelCase ( self : Union[str, Any] , lowercase__ : str) ->int:
"""simple docstring"""
_lowercase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__) if isinstance(lowercase__ , lowercase__) else value
_lowercase = value
def _UpperCAmelCase ( self : Tuple , *lowercase__ : Any , **lowercase__ : Dict) ->BatchEncoding:
"""simple docstring"""
_lowercase = kwargs.get("""is_split_into_words""" , lowercase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""")
return super()._batch_encode_plus(*lowercase__ , **lowercase__)
def _UpperCAmelCase ( self : List[str] , *lowercase__ : Optional[int] , **lowercase__ : List[Any]) ->BatchEncoding:
"""simple docstring"""
_lowercase = kwargs.get("""is_split_into_words""" , lowercase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""")
return super()._encode_plus(*lowercase__ , **lowercase__)
def _UpperCAmelCase ( self : Any , lowercase__ : str , lowercase__ : Optional[str] = None) ->Tuple[str]:
"""simple docstring"""
_lowercase = self._tokenizer.model.save(lowercase__ , name=lowercase__)
return tuple(lowercase__)
def _UpperCAmelCase ( self : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : int=None) ->Tuple:
"""simple docstring"""
_lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Optional[int] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None) ->List[int]:
"""simple docstring"""
_lowercase = [self.sep_token_id]
_lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 572 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase =subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__lowerCAmelCase =(
subprocess.check_output(f"git diff --diff-filter=d --name-only {fork_point_sha}".split()).decode("utf-8").split()
)
__lowerCAmelCase ="|".join(sys.argv[1:])
__lowerCAmelCase =re.compile(Rf"^({joined_dirs}).*?\.py$")
__lowerCAmelCase =[x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 333 |
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
UpperCAmelCase = 4
UpperCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
UpperCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 333 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : int = (UniPCMultistepScheduler,)
_SCREAMING_SNAKE_CASE : Optional[int] = (('''num_inference_steps''', 25),)
def lowercase__ ( self : Optional[Any] , **__UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = {
'num_train_timesteps': 1000,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**__UpperCAmelCase )
return config
def lowercase__ ( self : str , __UpperCAmelCase : Union[str, Any]=0 , **__UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop('num_inference_steps' , __UpperCAmelCase )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config(**__UpperCAmelCase )
UpperCamelCase_ = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residuals
UpperCamelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase )
UpperCamelCase_ = scheduler_class.from_pretrained(__UpperCAmelCase )
new_scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residuals
UpperCamelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase_ , UpperCamelCase_ = sample, sample
for t in range(__UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
UpperCamelCase_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
UpperCamelCase_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : Optional[Any] , __UpperCAmelCase : List[str]=0 , **__UpperCAmelCase : int ) -> Any:
"""simple docstring"""
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop('num_inference_steps' , __UpperCAmelCase )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase )
UpperCamelCase_ = scheduler_class.from_pretrained(__UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCamelCase_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
UpperCamelCase_ = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase__ ( self : List[str] , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : int ) -> Any:
"""simple docstring"""
if scheduler is None:
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(**__UpperCAmelCase )
UpperCamelCase_ = scheduler_class(**__UpperCAmelCase )
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(**__UpperCAmelCase )
UpperCamelCase_ = scheduler_class(**__UpperCAmelCase )
UpperCamelCase_ = 10
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = model(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
return sample
def lowercase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = dict(self.forward_default_kwargs )
UpperCamelCase_ = kwargs.pop('num_inference_steps' , __UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**__UpperCAmelCase )
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCAmelCase , 'set_timesteps' ):
scheduler.set_timesteps(__UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , 'set_timesteps' ):
UpperCamelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase_ = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCamelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
UpperCamelCase_ = scheduler.timesteps[5]
UpperCamelCase_ = scheduler.timesteps[6]
UpperCamelCase_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
UpperCamelCase_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
UpperCamelCase_ = UniPCMultistepScheduler(**self.get_scheduler_config() )
UpperCamelCase_ = self.full_loop(scheduler=__UpperCAmelCase )
UpperCamelCase_ = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1E-3
UpperCamelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCamelCase_ = DEISMultistepScheduler.from_config(scheduler.config )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCamelCase_ = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCamelCase_ = self.full_loop(scheduler=__UpperCAmelCase )
UpperCamelCase_ = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1E-3
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(thresholding=__UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , )
UpperCamelCase_ = self.full_loop(
solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , )
assert not torch.isnan(__UpperCAmelCase ).any(), "Samples have nan numbers"
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.check_over_configs(lower_order_final=__UpperCAmelCase )
self.check_over_configs(lower_order_final=__UpperCAmelCase )
def lowercase__ ( self : Dict ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=0 )
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.full_loop()
UpperCamelCase_ = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1E-3
def lowercase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.full_loop(prediction_type='v_prediction' )
UpperCamelCase_ = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1_014 ) < 1E-3
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config(thresholding=__UpperCAmelCase , dynamic_thresholding_ratio=0 )
UpperCamelCase_ = scheduler_class(**__UpperCAmelCase )
UpperCamelCase_ = 10
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase_ = model(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def lowercase__ ( self : Dict , **__UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
UpperCamelCase_ = self.get_scheduler_config(**__UpperCAmelCase )
UpperCamelCase_ = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Union[str, Any] = logging.get_logger(__name__)
__a : List[Any] = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = '''falcon'''
_SCREAMING_SNAKE_CASE : Tuple = ['''past_key_values''']
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any]=65024 , __UpperCAmelCase : int=4544 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : Optional[int]=71 , __UpperCAmelCase : Dict=1E-5 , __UpperCAmelCase : Any=0.02 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Dict=11 , __UpperCAmelCase : Optional[int]=11 , **__UpperCAmelCase : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase_ = kwargs.pop('n_embed' , __UpperCAmelCase )
UpperCamelCase_ = hidden_size if n_embed is None else n_embed
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = layer_norm_epsilon
UpperCamelCase_ = initializer_range
UpperCamelCase_ = use_cache
UpperCamelCase_ = hidden_dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCamelCase_ = alibi
UpperCamelCase_ = new_decoder_architecture
UpperCamelCase_ = multi_query # Ignored when new_decoder_architecture is True
UpperCamelCase_ = parallel_attn
UpperCamelCase_ = bias
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase__ ( self : Any ) -> List[str]:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return not self.alibi
| 559 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE : str = "scheduler_config.json"
class _lowerCamelCase( _a ):
lowercase_ : Any = 1
lowercase_ : Dict = 2
lowercase_ : Union[str, Any] = 3
lowercase_ : Tuple = 4
lowercase_ : Optional[Any] = 5
@dataclass
class _lowerCamelCase( _a ):
lowercase_ : jnp.ndarray
class _lowerCamelCase:
lowercase_ : Union[str, Any] = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : Dict = []
lowercase_ : int = True
@classmethod
def UpperCamelCase ( cls, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase, subfolder=lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase, )
_lowercase , _lowercase : Tuple = cls.from_config(lowerCamelCase, return_unused_kwargs=lowerCamelCase, **lowerCamelCase)
if hasattr(lowerCamelCase, 'create_state') and getattr(lowerCamelCase, 'has_state', lowerCamelCase):
_lowercase : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = False, **lowerCamelCase) -> Any:
"""simple docstring"""
self.save_config(save_directory=lowerCamelCase, push_to_hub=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls) -> Any:
"""simple docstring"""
_lowercase : Any = list(set([cls.__name__] + cls._compatibles))
_lowercase : Dict = importlib.import_module(__name__.split('.')[0])
_lowercase : Any = [
getattr(lowerCamelCase, lowerCamelCase) for c in compatible_classes_str if hasattr(lowerCamelCase, lowerCamelCase)
]
return compatible_classes
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> jnp.ndarray:
assert len(lowerCamelCase_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase_ ) - x.ndim) ) , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(lowerCamelCase_ ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
_lowercase : List[Any] = []
for i in range(lowerCamelCase_ ):
_lowercase : Any = i / num_diffusion_timesteps
_lowercase : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase_ ) / alpha_bar(lowerCamelCase_ ) , lowerCamelCase_ ) )
return jnp.array(lowerCamelCase_ , dtype=lowerCamelCase_ )
@flax.struct.dataclass
class _lowerCamelCase:
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : int = scheduler.config
if config.trained_betas is not None:
_lowercase : str = jnp.asarray(config.trained_betas, dtype=scheduler.dtype)
elif config.beta_schedule == "linear":
_lowercase : List[Any] = jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype)
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowercase : Dict = (
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype)
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowercase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype)
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''')
_lowercase : List[str] = 1.0 - betas
_lowercase : Union[str, Any] = jnp.cumprod(lowerCamelCase, axis=0)
return cls(
alphas=lowerCamelCase, betas=lowerCamelCase, alphas_cumprod=lowerCamelCase, )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = state.alphas_cumprod
_lowercase : str = alphas_cumprod[timesteps] ** 0.5
_lowercase : Optional[Any] = sqrt_alpha_prod.flatten()
_lowercase : Tuple = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
_lowercase : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowercase : int = broadcast_to_shape_from_left(lowerCamelCase_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase , _lowercase : Optional[int] = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase , _lowercase : Tuple = get_sqrt_alpha_prod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 89 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : int = 0 ) -> Tuple:
snake_case = length or len(_UpperCamelCase )
snake_case = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
snake_case , snake_case = list_data[i + 1], list_data[i]
snake_case = True
return list_data if not swapped else bubble_sort(_UpperCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
_SCREAMING_SNAKE_CASE = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( __lowerCAmelCase : str ) -> List[str]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case = model_type_to_module_name(__lowerCAmelCase )
snake_case = importlib.import_module(F'''.{module_name}''' , """transformers.models""" )
try:
return getattr(__lowerCAmelCase , __lowerCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__lowerCAmelCase , """__name__""" , __lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case = importlib.import_module("""transformers""" )
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
return getattr(__lowerCAmelCase , __lowerCAmelCase )
return None
def __lowerCamelCase ( __lowerCAmelCase : Union[str, os.PathLike] , __lowerCAmelCase : Optional[Union[str, os.PathLike]] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[Dict[str, str]] = None , __lowerCAmelCase : Optional[Union[bool, str]] = None , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : bool = False , **__lowerCAmelCase : int , ) -> str:
snake_case = get_file_from_repo(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(__lowerCAmelCase , encoding="""utf-8""" ) as reader:
return json.load(__lowerCAmelCase )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : str )-> List[str]:
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(__snake_case )
def lowerCAmelCase ( cls : List[str] , __snake_case : Union[str, Any] , **__snake_case : Optional[int] )-> Tuple:
snake_case = kwargs.pop("""config""" , __snake_case )
snake_case = kwargs.pop("""trust_remote_code""" , __snake_case )
snake_case = True
snake_case , snake_case = ImageProcessingMixin.get_image_processor_dict(__snake_case , **__snake_case )
snake_case = config_dict.get("""image_processor_type""" , __snake_case )
snake_case = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
snake_case = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
snake_case = config_dict.pop("""feature_extractor_type""" , __snake_case )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
snake_case = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
snake_case = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
snake_case = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__snake_case , __snake_case ):
snake_case = AutoConfig.from_pretrained(__snake_case , **__snake_case )
# It could be in `config.image_processor_type``
snake_case = getattr(__snake_case , """image_processor_type""" , __snake_case )
if hasattr(__snake_case , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
snake_case = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
snake_case = image_processor_class_from_name(__snake_case )
snake_case = image_processor_auto_map is not None
snake_case = image_processor_class is not None or type(__snake_case ) in IMAGE_PROCESSOR_MAPPING
snake_case = resolve_trust_remote_code(
__snake_case , __snake_case , __snake_case , __snake_case )
if has_remote_code and trust_remote_code:
snake_case = get_class_from_dynamic_module(
__snake_case , __snake_case , **__snake_case )
snake_case = kwargs.pop("""code_revision""" , __snake_case )
if os.path.isdir(__snake_case ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__snake_case , **__snake_case )
elif image_processor_class is not None:
return image_processor_class.from_dict(__snake_case , **__snake_case )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__snake_case ) in IMAGE_PROCESSOR_MAPPING:
snake_case = IMAGE_PROCESSOR_MAPPING[type(__snake_case )]
return image_processor_class.from_dict(__snake_case , **__snake_case )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCAmelCase ( __snake_case : int , __snake_case : List[str] )-> Dict:
IMAGE_PROCESSOR_MAPPING.register(__snake_case , __snake_case )
| 517 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
snake_case_ = """van"""
def __init__( self : Dict , __snake_case : Union[str, Any]=2_24 , __snake_case : List[str]=3 , __snake_case : Optional[Any]=[7, 3, 3, 3] , __snake_case : int=[4, 2, 2, 2] , __snake_case : int=[64, 1_28, 3_20, 5_12] , __snake_case : List[str]=[3, 3, 12, 3] , __snake_case : Optional[int]=[8, 8, 4, 4] , __snake_case : str="gelu" , __snake_case : Optional[int]=0.02 , __snake_case : Dict=1e-6 , __snake_case : str=1e-2 , __snake_case : str=0.0 , __snake_case : str=0.0 , **__snake_case : Optional[Any] , )-> Union[str, Any]:
super().__init__(**UpperCAmelCase_ )
snake_case = image_size
snake_case = num_channels
snake_case = patch_sizes
snake_case = strides
snake_case = hidden_sizes
snake_case = depths
snake_case = mlp_ratios
snake_case = hidden_act
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = layer_scale_init_value
snake_case = drop_path_rate
snake_case = dropout_rate
| 369 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Optional[int] ="""imagegpt"""
A__ : Union[str, Any] =["""past_key_values"""]
A__ : Union[str, Any] ={
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCAmelCase_ : Dict=512 + 1 , UpperCAmelCase_ : Union[str, Any]=32 * 32 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : Union[str, Any]=24 , UpperCAmelCase_ : List[str]=8 , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple="quick_gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=1e-5 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[Any]=False , **UpperCAmelCase_ : List[str] , ):
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_positions
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_inner
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scale_attn_weights
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ = tie_word_embeddings
super().__init__(tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ )
class lowercase__ ( _UpperCAmelCase ):
@property
def A_ ( self : List[str] ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def A_ ( self : Optional[int] , UpperCAmelCase_ : "FeatureExtractionMixin" , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 32 , ):
SCREAMING_SNAKE_CASE__ = self._generate_dummy_images(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dict(preprocessor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) )
return inputs
| 472 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
snake_case__ : List[Any] = """src/transformers"""
snake_case__ : Optional[Any] = """docs/source/en/tasks"""
def _snake_case (__lowercase , __lowercase , __lowercase):
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n') as f:
UpperCamelCase_ = f.readlines()
# Find the start prompt.
UpperCamelCase_ = 0
while not lines[start_index].startswith(lowerCAmelCase__):
start_index += 1
start_index += 1
UpperCamelCase_ = start_index
while not lines[end_index].startswith(lowerCAmelCase__):
end_index += 1
end_index -= 1
while len(lines[start_index]) <= 1:
start_index += 1
while len(lines[end_index]) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index]), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
snake_case__ : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
snake_case__ : Tuple = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
snake_case__ : Any = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def _snake_case (__lowercase):
UpperCamelCase_ = TASK_GUIDE_TO_MODELS[task_guide]
UpperCamelCase_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase__ , set())
UpperCamelCase_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()]) + "\n"
def _snake_case (__lowercase , __lowercase=False):
UpperCamelCase_ = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , lowerCAmelCase__) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCamelCase_ = get_model_list_for_task(lowerCAmelCase__)
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__) , 'w' , encoding='utf-8' , newline='\n') as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:])
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
' to fix this.')
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
snake_case__ : str = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 709 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ : str = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = None
A_ = None
A_ = None
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> Any:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , )
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase_ = torch.load(_UpperCAmelCase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase_ = (
processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
)
logger.info('Training examples: %s' , len(_UpperCAmelCase ) )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info('Saving features into cached file %s' , _UpperCAmelCase )
torch.save(self.features , _UpperCAmelCase )
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Dict:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 128 , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> int:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
UpperCamelCase_ = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase_ = tf.data.Dataset.from_generator(
_UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _UpperCAmelCase ( self ) -> List[str]:
return self.dataset
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> int:
return self.label_list
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def _UpperCAmelCase ( self ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = []
for i, line in enumerate(_UpperCAmelCase ):
if i == 0:
continue
UpperCamelCase_ = '%s-%s' % (set_type, line[0])
UpperCamelCase_ = line[5]
UpperCamelCase_ = line[6]
UpperCamelCase_ = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCamelCase_ = line[0]
examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
return examples
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCamelCase_ = {label: i for i, label in enumerate(__lowercase)}
UpperCamelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(__lowercase) , desc='convert examples to features'):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index))
UpperCamelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowercase , max_length=__lowercase , padding='max_length' , truncation=__lowercase , return_overflowing_tokens=__lowercase , )
UpperCamelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCamelCase_ = int(example.pairID)
features.append(InputFeatures(**__lowercase , label=__lowercase , pairID=__lowercase))
for i, example in enumerate(examples[:5]):
logger.info('*** Example ***')
logger.info(f"""guid: {example}""")
logger.info(f"""features: {features[i]}""")
return features
snake_case__ : List[str] = {
"""hans""": 3,
}
snake_case__ : Union[str, Any] = {
"""hans""": HansProcessor,
}
| 618 | 0 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase ( _snake_case ):
def __SCREAMING_SNAKE_CASE ( self : int , __lowerCamelCase : str ):
with open(__lowerCamelCase , encoding='''utf-8''' ) as input_file:
UpperCAmelCase__ :Any = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCAmelCase__ :Optional[int] = input_file.read()
UpperCAmelCase__ :List[Any] = regexp.search(__lowerCamelCase )
return match
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : str ):
with open(__lowerCamelCase , encoding='''utf-8''' ) as input_file:
UpperCAmelCase__ :Optional[int] = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
UpperCAmelCase__ :Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ :int = regexp.finditer(__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
UpperCAmelCase__ :int = Path('''./datasets''' )
UpperCAmelCase__ :Dict = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCamelCase ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
UpperCAmelCase__ :Any = Path('''./datasets''' )
UpperCAmelCase__ :List[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCamelCase ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 467 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = ["image_processor", "tokenizer"]
UpperCAmelCase = "OwlViTImageProcessor"
UpperCAmelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str ):
UpperCAmelCase__ :int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCamelCase , )
UpperCAmelCase__ :Union[str, Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase__ :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : int , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : str="max_length" , __lowerCamelCase : Any="np" , **__lowerCamelCase : Tuple ):
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase ) or (isinstance(__lowerCamelCase , __lowerCamelCase ) and not isinstance(text[0] , __lowerCamelCase )):
UpperCAmelCase__ :Any = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )]
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(text[0] , __lowerCamelCase ):
UpperCAmelCase__ :Tuple = []
# Maximum number of queries across batch
UpperCAmelCase__ :List[str] = max([len(__lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase ) != max_num_queries:
UpperCAmelCase__ :str = t + [''' '''] * (max_num_queries - len(__lowerCamelCase ))
UpperCAmelCase__ :Tuple = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
encodings.append(__lowerCamelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
UpperCAmelCase__ :List[Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Any = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ :List[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Union[str, Any] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ :Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
UpperCAmelCase__ :List[str] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ :Optional[Any] = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :int = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
UpperCAmelCase__ :List[Any] = BatchEncoding()
UpperCAmelCase__ :Union[str, Any] = input_ids
UpperCAmelCase__ :Dict = attention_mask
if query_images is not None:
UpperCAmelCase__ :Tuple = BatchEncoding()
UpperCAmelCase__ :int = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ).pixel_values
UpperCAmelCase__ :Optional[int] = query_pixel_values
if images is not None:
UpperCAmelCase__ :str = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ :Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ :Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : int ):
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : Tuple ):
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ):
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ):
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCamelCase , )
return self.image_processor_class
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCamelCase , )
return self.image_processor
| 467 | 1 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A = logging.get_logger(__name__)
A = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_UpperCamelCase )} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
__lowerCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowerCAmelCase = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
__lowerCAmelCase = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
__lowerCAmelCase = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
__lowerCAmelCase = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__lowerCAmelCase = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
__lowerCAmelCase = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
__lowerCAmelCase = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''train'''
__lowerCAmelCase = '''dev'''
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = Split.train , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = "pt" , ):
__a : Union[str, Any] = args
__a : str = is_language_sensitive
__a : Optional[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
try:
__a : List[str] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__a : Union[str, Any] = mode
# Load data features from cache or dataset file
__a : Tuple = 'v2' if args.version_2_with_negative else 'v1'
__a : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__a : Any = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not args.overwrite_cache:
__a : Any = time.time()
__a : Optional[Any] = torch.load(_UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__a : Optional[Any] = self.old_features['features']
__a : Union[str, Any] = self.old_features.get('''dataset''' , _UpperCAmelCase )
__a : Optional[Any] = self.old_features.get('''examples''' , _UpperCAmelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
''' future run''' )
else:
if mode == Split.dev:
__a : List[str] = self.processor.get_dev_examples(args.data_dir )
else:
__a : str = self.processor.get_train_examples(args.data_dir )
__a : Union[str, Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_UpperCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_UpperCAmelCase , )
__a : Optional[int] = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , _UpperCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
return len(self.features )
def __getitem__( self , _UpperCAmelCase ):
__a : List[str] = self.features[i]
__a : Any = torch.tensor(feature.input_ids , dtype=torch.long )
__a : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
__a : str = torch.tensor(feature.token_type_ids , dtype=torch.long )
__a : Union[str, Any] = torch.tensor(feature.cls_index , dtype=torch.long )
__a : int = torch.tensor(feature.p_mask , dtype=torch.float )
__a : Dict = torch.tensor(feature.is_impossible , dtype=torch.float )
__a : Dict = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__a : Any = torch.tensor(feature.start_position , dtype=torch.long )
__a : str = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs | 703 |
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = OpenAIGPTTokenizer
__lowerCAmelCase = OpenAIGPTTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__a : str = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__a : List[Any] = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return "lower newer", "lower newer"
def _lowerCamelCase ( self ):
__a : Tuple = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__a : Tuple = '''lower'''
__a : Union[str, Any] = ['''low''', '''er</w>''']
__a : str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : List[Any] = tokens + ['''<unk>''']
__a : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
__a : str = '''This is a simple input'''
__a : Dict = ['''This is a simple input 1''', '''This is a simple input 2''']
__a : List[Any] = ('''This is a simple input''', '''This is a pair''')
__a : Optional[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , )
def _lowerCamelCase ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
pass | 101 | 0 |
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ : tuple[int, int] , lowerCamelCase_ : int ) -> list[tuple[int, int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = position
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
SCREAMING_SNAKE_CASE_ : str = []
for position in positions:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCamelCase_ )
return permissible_positions
def __UpperCAmelCase ( lowerCamelCase_ : list[list[int]] ) -> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def __UpperCAmelCase ( lowerCamelCase_ : list[list[int]] , lowerCamelCase_ : tuple[int, int] , lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if is_complete(lowerCamelCase_ ):
return True
for position in get_valid_pos(lowerCamelCase_ , len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = position
if board[y][x] == 0:
SCREAMING_SNAKE_CASE_ : int = curr + 1
if open_knight_tour_helper(lowerCamelCase_ , lowerCamelCase_ , curr + 1 ):
return True
SCREAMING_SNAKE_CASE_ : str = 0
return False
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [[0 for i in range(lowerCamelCase_ )] for j in range(lowerCamelCase_ )]
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
if open_knight_tour_helper(lowerCamelCase_ , (i, j) , 1 ):
return board
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : str = F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__magic_name__ : int = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A=1_6 , _A=1_3 , _A=7 , _A=1_4 , _A=1_0 , _A=1_9 , _A=5 , _A=4 , _A=True , _A=1_6 , _A=2 , _A=4 , _A=4 , _A="gelu" , _A=0.1 , _A=0.1 , _A=[1, 2, 3, 4, 5] , _A=2_5 , _A=5 , ):
'''simple docstring'''
UpperCamelCase : Dict = d_model
UpperCamelCase : Tuple = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Union[str, Any] = prediction_length
UpperCamelCase : List[Any] = context_length
UpperCamelCase : int = cardinality
UpperCamelCase : Tuple = num_time_features
UpperCamelCase : List[Any] = lags_sequence
UpperCamelCase : Optional[Any] = embedding_dimension
UpperCamelCase : Any = is_training
UpperCamelCase : Any = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Any = intermediate_size
UpperCamelCase : Tuple = hidden_act
UpperCamelCase : Union[str, Any] = hidden_dropout_prob
UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = context_length
UpperCamelCase : str = prediction_length + label_length
UpperCamelCase : Dict = label_length
UpperCamelCase : Any = moving_average
UpperCamelCase : int = autocorrelation_factor
def _a ( self ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = config.context_length + max(config.lags_sequence )
UpperCamelCase : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCamelCase : Any = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, _past_length] )
UpperCamelCase : str = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCamelCase : int = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCamelCase : Optional[Any] = floats_tensor([self.batch_size, config.prediction_length] )
UpperCamelCase : Dict = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.get_config()
UpperCamelCase : int = self.prepare_autoformer_inputs_dict(_A )
return config, inputs_dict
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : str = AutoformerModel(config=_A ).to(_A ).eval()
UpperCamelCase : List[str] = model(**_A )
UpperCamelCase : Any = outputs.encoder_last_hidden_state
UpperCamelCase : List[Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : Optional[Any] = model.get_encoder()
encoder.save_pretrained(_A )
UpperCamelCase : Union[str, Any] = AutoformerEncoder.from_pretrained(_A ).to(_A )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = model.create_network_inputs(**_A )
UpperCamelCase , UpperCamelCase : Tuple = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCamelCase : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCamelCase : str = encoder(inputs_embeds=_A )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
UpperCamelCase : Tuple = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCamelCase : Optional[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCamelCase : List[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCamelCase : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : Any = model.get_decoder()
decoder.save_pretrained(_A )
UpperCamelCase : Optional[Any] = AutoformerDecoder.from_pretrained(_A ).to(_A )
UpperCamelCase : str = decoder(
trend=_A , inputs_embeds=_A , encoder_hidden_states=_A , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCAmelCase : str = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCAmelCase : Dict = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : Optional[int] = False
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = AutoformerModelTester(self )
UpperCamelCase : int = ConfigTester(self , config_class=_A , has_text_modality=_A )
def _a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = model_class(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
UpperCamelCase , UpperCamelCase : Optional[Any] = model_class.from_pretrained(_A , output_loading_info=_A )
self.assertEqual(info["""missing_keys"""] , [] )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_A )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = inspect.signature(getattr(_A , """forward""" ) )
# The main input is the name of the argument after `self`
UpperCamelCase : Optional[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(_A )
UpperCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[Any] = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(_A )] , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Dict = True
UpperCamelCase : Tuple = getattr(self.model_tester , """seq_length""" , _A )
UpperCamelCase : Dict = getattr(self.model_tester , """decoder_seq_length""" , _A )
UpperCamelCase : Optional[Any] = getattr(self.model_tester , """encoder_seq_length""" , _A )
UpperCamelCase : Optional[int] = getattr(self.model_tester , """d_model""" , _A )
UpperCamelCase : Optional[Any] = getattr(self.model_tester , """num_attention_heads""" , _A )
UpperCamelCase : Tuple = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = True
UpperCamelCase : Any = False
UpperCamelCase : List[str] = True
UpperCamelCase : List[str] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCamelCase : Tuple = model(**self._prepare_for_class(_A , _A ) )
UpperCamelCase : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase : Tuple = True
UpperCamelCase : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCamelCase : List[str] = outputs.encoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCamelCase : List[str] = len(_A )
UpperCamelCase : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_A , _A )
# decoder attentions
UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(_A , (list, tuple) )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCamelCase : int = outputs.cross_attentions
self.assertIsInstance(_A , (list, tuple) )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCamelCase : Any = True
UpperCamelCase : Dict = True
UpperCamelCase : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCamelCase : int = model(**self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + 2 , len(_A ) )
UpperCamelCase : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _a ( self ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def UpperCamelCase (SCREAMING_SNAKE_CASE="train-batch.pt" ):
UpperCamelCase : List[str] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=SCREAMING_SNAKE_CASE , repo_type="""dataset""" )
UpperCamelCase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_A )
UpperCamelCase : List[str] = prepare_batch()
with torch.no_grad():
UpperCamelCase : Dict = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
UpperCamelCase : Optional[int] = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _A )
UpperCamelCase : Optional[int] = torch.tensor(
[[0.35_93, -1.33_98, 0.63_30], [0.22_79, 1.53_96, -0.17_92], [0.04_50, 1.32_25, -0.23_35]] , device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] , _A , atol=_A ) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_A )
UpperCamelCase : Dict = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
UpperCamelCase : Dict = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _A )
UpperCamelCase : Any = torch.tensor(
[[-0.07_34, -0.90_36, 0.83_58], [4.71_86, 2.41_13, 1.95_81], [1.79_53, 2.35_58, 1.29_70]] , device=_A )
self.assertTrue(torch.allclose(output[0, :3, :3] , _A , atol=_A ) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_A )
UpperCamelCase : List[Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
UpperCamelCase : Optional[Any] = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
UpperCamelCase : Tuple = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _A )
UpperCamelCase : Dict = torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] , device=_A )
UpperCamelCase : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _A , rtol=1e-1 ) )
| 102 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__lowercase :Any = KandinskyVaaPipeline
__lowercase :int = [
"image_embeds",
"negative_image_embeds",
]
__lowercase :str = ["image_embeds", "negative_image_embeds"]
__lowercase :List[Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowercase :Any = False
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase_ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.dummy_unet
lowerCamelCase_ = self.dummy_movq
lowerCamelCase_ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=UpperCamelCase__ , )
lowerCamelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith('''mps''' ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCamelCase_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = '''cpu'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**UpperCamelCase__ )
lowerCamelCase_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
lowerCamelCase_ = output.images
lowerCamelCase_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array(
[0.6_237_976, 1.0, 0.36_441_332, 1.0, 0.70_639_634, 0.29_877_186, 0.85_652_125, 0.5_216_843, 0.54_454_046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
lowerCamelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
lowerCamelCase_ = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowerCamelCase_ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = '''red cat, 4k photo'''
lowerCamelCase_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCamelCase_ , lowerCamelCase_ = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCamelCase_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCamelCase_ = pipeline(
image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , output_type='''np''' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ ) | 66 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self , UpperCamelCase__=None , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
super().__init__(features=UpperCamelCase__ )
lowerCamelCase_ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
import torch
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and column:
if all(
isinstance(UpperCamelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase__ )
return column
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
import torch
if isinstance(UpperCamelCase__ , (str, bytes, type(UpperCamelCase__ )) ):
return value
elif isinstance(UpperCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase_ = {}
if isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowerCamelCase_ = {'''dtype''': torch.intaa}
elif isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase_ = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowerCamelCase_ = np.asarray(UpperCamelCase__ )
return torch.tensor(UpperCamelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase__ , '''__array__''' ) and not isinstance(UpperCamelCase__ , torch.Tensor ):
lowerCamelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCamelCase__ , map_list=UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Mapping:
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_row(UpperCamelCase__ )
lowerCamelCase_ = self.python_features_decoder.decode_row(UpperCamelCase__ )
return self.recursive_tensorize(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> "torch.Tensor":
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_column(UpperCamelCase__ )
lowerCamelCase_ = self.python_features_decoder.decode_column(UpperCamelCase__ , pa_table.column_names[0] )
lowerCamelCase_ = self.recursive_tensorize(UpperCamelCase__ )
lowerCamelCase_ = self._consolidate(UpperCamelCase__ )
return column
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Mapping:
'''simple docstring'''
lowerCamelCase_ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase__ )
lowerCamelCase_ = self.python_features_decoder.decode_batch(UpperCamelCase__ )
lowerCamelCase_ = self.recursive_tensorize(UpperCamelCase__ )
for column_name in batch:
lowerCamelCase_ = self._consolidate(batch[column_name] )
return batch | 66 | 1 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=56 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Any=7 , SCREAMING_SNAKE_CASE__ : str="gelu_new" , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=5_12 , SCREAMING_SNAKE_CASE__ : str=16 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : List[Any]="block_sparse" , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , ) -> Union[str, Any]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_choices
__lowerCamelCase = rescale_embeddings
__lowerCamelCase = attention_type
__lowerCamelCase = use_bias
__lowerCamelCase = block_size
__lowerCamelCase = num_random_blocks
def __A ( self : List[str] ) -> List[str]:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_attention_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self : List[str] ) -> Any:
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs
__lowerCamelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( _A , unittest.TestCase ):
a__ : Dict = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
a__ : Tuple = False
a__ : Union[str, Any] = False
def __A ( self : Union[str, Any] ) -> List[Any]:
__lowerCamelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __A ( self : Optional[Any] ) -> List[Any]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __A ( self : Tuple ) -> Dict:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __A ( self : str ) -> Optional[int]:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __A ( self : Any ) -> Optional[Any]:
super().test_hidden_states_output()
@slow
def __A ( self : List[str] ) -> Optional[int]:
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> int:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __A ( self : Any ) -> int:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Tuple ):
return model(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with self.subTest('''JIT Enabled''' ):
__lowerCamelCase = model_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1e-5 , SCREAMING_SNAKE_CASE__ : List[Any]="outputs" , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> Union[str, Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 298 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( _A , _A , _A , unittest.TestCase ):
_A = StableUnCLIPPipeline
_A = TEXT_TO_IMAGE_PARAMS
_A = TEXT_TO_IMAGE_BATCH_PARAMS
_A = TEXT_TO_IMAGE_IMAGE_PARAMS
_A = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_A = False
def lowerCAmelCase_ ( self ) -> Any:
snake_case__ :Union[str, Any] = 32
snake_case__ :List[str] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
snake_case__ :int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
snake_case__ :Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=UpperCamelCase ,projection_dim=UpperCamelCase ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,) )
torch.manual_seed(0 )
snake_case__ :str = PriorTransformer(
num_attention_heads=2 ,attention_head_dim=12 ,embedding_dim=UpperCamelCase ,num_layers=1 ,)
torch.manual_seed(0 )
snake_case__ :Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" ,prediction_type="sample" ,num_train_timesteps=1_000 ,clip_sample=UpperCamelCase ,clip_sample_range=5.0 ,beta_schedule="squaredcos_cap_v2" ,)
# regular denoising components
torch.manual_seed(0 )
snake_case__ :Any = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
snake_case__ :Optional[int] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
snake_case__ :Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
snake_case__ :Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=UpperCamelCase ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,) )
torch.manual_seed(0 )
snake_case__ :List[Any] = UNetaDConditionModel(
sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") ,up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type="projection" ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=UpperCamelCase ,layers_per_block=1 ,upcast_attention=UpperCamelCase ,use_linear_projection=UpperCamelCase ,)
torch.manual_seed(0 )
snake_case__ :Optional[int] = DDIMScheduler(
beta_schedule="scaled_linear" ,beta_start=0.00085 ,beta_end=0.012 ,prediction_type="v_prediction" ,set_alpha_to_one=UpperCamelCase ,steps_offset=1 ,)
torch.manual_seed(0 )
snake_case__ :int = AutoencoderKL()
snake_case__ :Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=0 ) -> Optional[Any]:
if str(UpperCamelCase ).startswith("mps" ):
snake_case__ :Optional[int] = torch.manual_seed(UpperCamelCase )
else:
snake_case__ :int = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
snake_case__ :int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase_ ( self ) -> int:
snake_case__ :Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def lowerCAmelCase_ ( self ) -> str:
snake_case__ :List[Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self ) -> Optional[Any]:
snake_case__ :Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
snake_case__ :Dict = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" ,torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ :Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case__ :List[str] = pipe("anime turle" ,generator=UpperCamelCase ,output_type="np" )
snake_case__ :Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase ,UpperCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ :Optional[int] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" ,torch_dtype=torch.floataa )
snake_case__ :Optional[int] = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
snake_case__ :Union[str, Any] = pipe(
"anime turtle" ,prior_num_inference_steps=2 ,num_inference_steps=2 ,output_type="np" ,)
snake_case__ :int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 241 | 0 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class SCREAMING_SNAKE_CASE( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Any ) -> Any:
super().__init__()
UpperCAmelCase : Optional[int] = model
UpperCAmelCase : List[str] = 2
UpperCAmelCase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A ( self : List[str] ) -> Tuple:
pass
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] = LongformerModel.from_pretrained(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = LightningModel(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = torch.load(_lowerCAmelCase , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
UpperCAmelCase : Dict = LongformerForQuestionAnswering.from_pretrained(_lowerCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_lowerCAmelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
UpperCamelCase__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCamelCase__: Dict = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 713 |
'''simple docstring'''
UpperCamelCase__: dict[tuple[int, int, int], int] = {}
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCAmelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCAmelCase : int = _calculate(days - 1 , _lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCAmelCase : Optional[Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCAmelCase : Tuple = _calculate(days - 1 , _lowerCAmelCase , 0 )
UpperCAmelCase : str = state_late + state_absent + state_ontime
UpperCAmelCase : List[Any] = prizestrings
return prizestrings
def snake_case_ ( _lowerCAmelCase : int = 30 ) -> int:
return _calculate(_lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 528 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
"""simple docstring"""
def __init__( self: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: Tuple=13 , __lowerCamelCase: Optional[Any]=32 , __lowerCamelCase: Tuple=2 , __lowerCamelCase: Tuple=3 , __lowerCamelCase: List[str]=16 , __lowerCamelCase: Tuple=[32, 64, 128] , __lowerCamelCase: int=[1, 2, 1] , __lowerCamelCase: Union[str, Any]=[2, 2, 4] , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: Union[str, Any]=2.0 , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: str=0.0 , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: int=0.1 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Any=False , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[Any]=0.02 , __lowerCamelCase: List[Any]=1e-5 , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: Dict=None , __lowerCamelCase: List[Any]=True , __lowerCamelCase: List[str]=10 , __lowerCamelCase: int=8 , __lowerCamelCase: Any=["stage1", "stage2"] , __lowerCamelCase: int=[1, 2] , ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = parent
UpperCamelCase__: Optional[Any] = batch_size
UpperCamelCase__: List[str] = image_size
UpperCamelCase__: List[Any] = patch_size
UpperCamelCase__: int = num_channels
UpperCamelCase__: int = embed_dim
UpperCamelCase__: Optional[int] = hidden_sizes
UpperCamelCase__: Optional[int] = depths
UpperCamelCase__: str = num_heads
UpperCamelCase__: Tuple = window_size
UpperCamelCase__: int = mlp_ratio
UpperCamelCase__: Optional[Any] = qkv_bias
UpperCamelCase__: Union[str, Any] = hidden_dropout_prob
UpperCamelCase__: Optional[int] = attention_probs_dropout_prob
UpperCamelCase__: Tuple = drop_path_rate
UpperCamelCase__: Dict = hidden_act
UpperCamelCase__: Tuple = use_absolute_embeddings
UpperCamelCase__: List[Any] = patch_norm
UpperCamelCase__: List[str] = layer_norm_eps
UpperCamelCase__: int = initializer_range
UpperCamelCase__: Any = is_training
UpperCamelCase__: Union[str, Any] = scope
UpperCamelCase__: str = use_labels
UpperCamelCase__: List[Any] = type_sequence_label_size
UpperCamelCase__: int = encoder_stride
UpperCamelCase__: str = out_features
UpperCamelCase__: int = out_indices
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__: Optional[Any] = None
if self.use_labels:
UpperCamelCase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__: List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self: List[str] ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
UpperCamelCase__: Tuple = FocalNetModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase__: Any = model(UpperCamelCase__ )
UpperCamelCase__: int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase__: Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] ):
'''simple docstring'''
UpperCamelCase__: str = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase__: List[str] = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
UpperCamelCase__: Any = None
UpperCamelCase__: Any = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase__: str = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = FocalNetForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase__: Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__: List[str] = 1
UpperCamelCase__: Union[str, Any] = FocalNetForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase__: Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__: Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Any = self.type_sequence_label_size
UpperCamelCase__: Dict = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase__: Any = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__: int = 1
UpperCamelCase__: List[Any] = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase__: Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__: int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Optional[int] = config_and_inputs
UpperCamelCase__: Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( _snake_case , _snake_case , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: str = FocalNetModelTester(self )
UpperCamelCase__: str = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 , has_text_modality=UpperCamelCase__ )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
return
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__: List[str] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__: Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__: Optional[Any] = model_class(UpperCamelCase__ )
UpperCamelCase__: Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__: Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase__: Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase_ ( self: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: Tuple , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Tuple = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase__: Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCamelCase__: List[str] = outputs.hidden_states
UpperCamelCase__: Optional[int] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# FocalNet has a different seq_length
UpperCamelCase__: Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__: Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCamelCase__: List[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: List[str] = reshaped_hidden_states[0].shape
UpperCamelCase__: List[str] = (
reshaped_hidden_states[0].view(UpperCamelCase__ , UpperCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__: int = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__: Dict = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: Optional[Any] = 3
UpperCamelCase__: List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase__: Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase__: List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase__: str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
UpperCamelCase__: Union[str, Any] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__: Optional[Any] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@slow
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__: Dict = FocalNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: Optional[int] = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
UpperCamelCase__: str = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCamelCase__ )
UpperCamelCase__: List[str] = self.default_image_processor
UpperCamelCase__: int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase__: Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase__: Union[str, Any] = model(**UpperCamelCase__ )
# verify the logits
UpperCamelCase__: Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCamelCase__: Dict = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class _a ( _snake_case , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
UpperCamelCase__ = FocalNetConfig
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = FocalNetModelTester(self )
| 380 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCamelCase = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 288 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['GLPNFeatureExtractor']
_lowerCAmelCase = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.values[key]
def a_ (self ) -> Any:
return (
sum(self.charge_factor - len(_UpperCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Tuple:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCAmelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCAmelCase , _UpperCAmelCase )
| 399 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowercase_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class A_ ( datasets.BuilderConfig ):
'''simple docstring'''
__snake_case = None
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ):
import pyspark
def generate_fn():
__lowerCamelCase : Any = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__lowerCamelCase : Dict = df_with_partition_id.select('*' ).where(f'part_id = {partition_id}' ).drop('part_id' )
__lowerCamelCase : Any = partition_df.collect()
__lowerCamelCase : List[str] = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class A_ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self: List[Any] , a: "pyspark.sql.DataFrame" , a: List[str]=None , ):
__lowerCamelCase : Any = df
__lowerCamelCase : Optional[int] = partition_order or range(self.df.rdd.getNumPartitions() )
__lowerCamelCase : str = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self: str ):
yield from self.generate_examples_fn()
def _snake_case ( self: Tuple , a: np.random.Generator ):
__lowerCamelCase : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(a )
return SparkExamplesIterable(self.df , partition_order=a )
def _snake_case ( self: List[Any] , a: int , a: int ):
__lowerCamelCase : int = self.split_shard_indices_by_worker(a , a )
return SparkExamplesIterable(self.df , partition_order=a )
@property
def _snake_case ( self: Any ):
return len(self.partition_order )
class A_ ( datasets.DatasetBuilder ):
'''simple docstring'''
__snake_case = SparkConfig
def __init__( self: List[Any] , a: "pyspark.sql.DataFrame" , a: str = None , a: str = None , **a: Optional[int] , ):
import pyspark
__lowerCamelCase : List[Any] = pyspark.sql.SparkSession.builder.getOrCreate()
__lowerCamelCase : Dict = df
__lowerCamelCase : Any = working_dir
super().__init__(
cache_dir=a , config_name=str(self.df.semanticHash() ) , **a , )
def _snake_case ( self: List[Any] ):
# Returns the path of the created file.
def create_cache_and_write_probe(a: Any ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=a )
__lowerCamelCase : str = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(a , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowerCamelCase : Union[str, Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def _snake_case ( self: List[Any] ):
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self: str , a: datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _snake_case ( self: Union[str, Any] , a: Dict ):
import pyspark
def get_arrow_batch_size(a: List[Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__lowerCamelCase : int = self.df.count()
__lowerCamelCase : str = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowerCamelCase : List[str] = (
self.df.limit(a )
.repartition(1 )
.mapInArrow(a , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowerCamelCase : Union[str, Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowerCamelCase : Tuple = min(a , int(approx_total_size / max_shard_size ) )
__lowerCamelCase : Tuple = self.df.repartition(a )
def _snake_case ( self: Tuple , a: str , a: str , a: int , ):
import pyspark
__lowerCamelCase : Optional[Any] = ParquetWriter if file_format == 'parquet' else ArrowWriter
__lowerCamelCase : List[Any] = os.path.join(self._working_dir , os.path.basename(a ) ) if self._working_dir else fpath
__lowerCamelCase : List[Any] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowerCamelCase : List[Any] = self.config.features
__lowerCamelCase : Union[str, Any] = self._writer_batch_size
__lowerCamelCase : Tuple = self._fs.storage_options
def write_arrow(a: Any ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowerCamelCase : Optional[int] = pyspark.TaskContext().taskAttemptId()
__lowerCamelCase : int = next(a , a )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Dict = writer_class(
features=a , path=working_fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , writer_batch_size=a , storage_options=a , embed_local_files=a , )
__lowerCamelCase : int = pa.Table.from_batches([first_batch] )
writer.write_table(a )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowerCamelCase , __lowerCamelCase : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__lowerCamelCase : int = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , writer_batch_size=a , storage_options=a , embed_local_files=a , )
__lowerCamelCase : Tuple = pa.Table.from_batches([batch] )
writer.write_table(a )
if writer._num_bytes > 0:
__lowerCamelCase , __lowerCamelCase : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(a ) ):
__lowerCamelCase : int = os.path.join(os.path.dirname(a ) , os.path.basename(a ) )
shutil.move(a , a )
__lowerCamelCase : Dict = (
self.df.mapInArrow(a , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _snake_case ( self: Tuple , a: "datasets.SplitGenerator" , a: str = "arrow" , a: Optional[Union[str, int]] = None , a: Optional[int] = None , **a: Dict , ):
self._validate_cache_dir()
__lowerCamelCase : Dict = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(a )
__lowerCamelCase : Tuple = not is_remote_filesystem(self._fs )
__lowerCamelCase : Optional[int] = os.path.join if is_local else posixpath.join
__lowerCamelCase : int = '-TTTTT-SSSSS-of-NNNNN'
__lowerCamelCase : Any = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
__lowerCamelCase : Union[str, Any] = path_join(self._output_dir , a )
__lowerCamelCase : int = 0
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Any = []
__lowerCamelCase : Any = []
for task_id, content in self._prepare_split_single(a , a , a ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Any = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(a )
__lowerCamelCase : Dict = total_num_examples
__lowerCamelCase : Any = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
__lowerCamelCase : int = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowerCamelCase : Tuple = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
a: int , a: int , a: int , ):
rename(
a , fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , fpath.replace('TTTTT-SSSSS' , F'{global_shard_id:05d}' ).replace('NNNNN' , F'{total_shards:05d}' ) , )
__lowerCamelCase : Any = []
__lowerCamelCase : int = 0
for i in range(len(a ) ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = task_id_and_num_shards[i]
for shard_id in range(a ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(a , len(a ) ).map(lambda a : _rename_shard(*a ) ).collect()
else:
# don't use any pattern
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Tuple = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , fpath.replace(a , '' ) , )
def _snake_case ( self: Dict , a: "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 669 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
lowercase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ = requests.get(image_url).content
lowercase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 669 | 1 |
'''simple docstring'''
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case : Tuple =str(bin(lowerCamelCase_ ) )
binary_number += "0" * shift_amount
return binary_number
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
snake_case : List[str] =str(bin(lowerCamelCase_ ) )[2:]
if shift_amount >= len(lowerCamelCase_ ):
return "0b0"
snake_case : Tuple =binary_number[: len(lowerCamelCase_ ) - shift_amount]
return "0b" + shifted_binary_number
def _a ( lowerCamelCase_ , lowerCamelCase_ ):
if number >= 0: # Get binary representation of positive number
snake_case : Dict ='''0''' + str(bin(lowerCamelCase_ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case : Optional[Any] =len(bin(lowerCamelCase_ )[3:] ) # Find 2's complement of number
snake_case : Tuple =bin(abs(lowerCamelCase_ ) - (1 << binary_number_length) )[3:]
snake_case : Any =(
'''1''' + '''0''' * (binary_number_length - len(lowerCamelCase_ )) + binary_number
)
if shift_amount >= len(lowerCamelCase_ ):
return "0b" + binary_number[0] * len(lowerCamelCase_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowerCamelCase_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 |
'''simple docstring'''
def _a ( ):
for n in range(1 , 1_00_00_00 ):
yield n * (n + 1) // 2
def _a ( lowerCamelCase_ ):
snake_case : Optional[Any] =1
snake_case : int =2
while i * i <= n:
snake_case : Any =0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _a ( ):
return next(i for i in triangle_number_generator() if count_divisors(lowerCamelCase_ ) > 5_00 )
if __name__ == "__main__":
print(solution())
| 136 | 1 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
a ="\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
a ="\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
a ="\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
def remove_articles(lowerCamelCase__ ):
__lowerCamelCase : Any = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(lowerCamelCase_ , ' ' , lowerCamelCase_ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__lowerCamelCase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
return int(normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : str = [any(compute_exact(lowerCamelCase_ , lowerCamelCase_ ) for ref in refs ) for pred, refs in zip(lowerCamelCase_ , lowerCamelCase_ )]
return (sum(lowerCamelCase_ ) / len(lowerCamelCase_ )) * 1_0_0
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Tuple = [rgram for rgrams in rgramslist for rgram in rgrams]
__lowerCamelCase : Any = Counter(lowerCamelCase_ )
__lowerCamelCase : Optional[Any] = Counter(lowerCamelCase_ )
__lowerCamelCase : Optional[Any] = Counter()
for sgram, scount in sgramcounter.items():
__lowerCamelCase : Optional[Any] = scount * numref
__lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase_ )
__lowerCamelCase : List[Any] = Counter()
for cgram, ccount in cgramcounter.items():
__lowerCamelCase : List[str] = ccount * numref
# KEEP
__lowerCamelCase : Tuple = sgramcounter_rep & cgramcounter_rep
__lowerCamelCase : int = keepgramcounter_rep & rgramcounter
__lowerCamelCase : List[Any] = sgramcounter_rep & rgramcounter
__lowerCamelCase : str = 0
__lowerCamelCase : int = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : Tuple = 1
if len(lowerCamelCase_ ) > 0:
__lowerCamelCase : Optional[int] = keeptmpscorea / len(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__lowerCamelCase : Optional[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__lowerCamelCase : str = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__lowerCamelCase : Optional[int] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__lowerCamelCase : Optional[int] = sgramcounter_rep - cgramcounter_rep
__lowerCamelCase : Optional[int] = delgramcounter_rep - rgramcounter
__lowerCamelCase : Dict = sgramcounter_rep - rgramcounter
__lowerCamelCase : List[str] = 0
__lowerCamelCase : Optional[int] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase : int = 1
if len(lowerCamelCase_ ) > 0:
__lowerCamelCase : Tuple = deltmpscorea / len(lowerCamelCase_ )
# ADDITION
__lowerCamelCase : Any = set(lowerCamelCase_ ) - set(lowerCamelCase_ )
__lowerCamelCase : Optional[Any] = set(lowerCamelCase_ ) & set(lowerCamelCase_ )
__lowerCamelCase : int = set(lowerCamelCase_ ) - set(lowerCamelCase_ )
__lowerCamelCase : int = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : Tuple = 1
if len(lowerCamelCase_ ) > 0:
__lowerCamelCase : Optional[int] = addtmpscore / len(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
__lowerCamelCase : Any = addtmpscore / len(lowerCamelCase_ )
__lowerCamelCase : Optional[int] = 0
if addscore_precision > 0 or addscore_recall > 0:
__lowerCamelCase : Union[str, Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : List[str] = len(lowerCamelCase_ )
__lowerCamelCase : List[str] = ssent.split(' ' )
__lowerCamelCase : Optional[int] = csent.split(' ' )
__lowerCamelCase : Dict = []
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Any = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : int = []
for rsent in rsents:
__lowerCamelCase : Optional[int] = rsent.split(' ' )
__lowerCamelCase : List[Any] = []
__lowerCamelCase : str = []
__lowerCamelCase : Dict = []
ragramslist.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
__lowerCamelCase : Optional[int] = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
__lowerCamelCase : List[Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
__lowerCamelCase : List[str] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
__lowerCamelCase : Optional[Any] = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
__lowerCamelCase : Dict = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
__lowerCamelCase : Tuple = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
__lowerCamelCase : str = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
__lowerCamelCase : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
__lowerCamelCase : List[str] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(lowerCamelCase_ )
(__lowerCamelCase) : Union[str, Any] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
(__lowerCamelCase) : Union[str, Any] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
(__lowerCamelCase) : int = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
(__lowerCamelCase) : Optional[int] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
__lowerCamelCase : List[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__lowerCamelCase : Tuple = sum([delascore, delascore, delascore, delascore] ) / 4
__lowerCamelCase : Optional[int] = sum([addascore, addascore, addascore, addascore] ) / 4
__lowerCamelCase : str = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = "13a" , lowerCamelCase__ = True ) -> Optional[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__lowerCamelCase : int = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__lowerCamelCase : List[Any] = sacrebleu.metrics.bleu._get_tokenizer(lowerCamelCase_ )()(lowerCamelCase_ )
else:
__lowerCamelCase : Tuple = sacrebleu.TOKENIZERS[tokenizer]()(lowerCamelCase_ )
elif tokenizer == "moses":
__lowerCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(lowerCamelCase_ , return_str=lowerCamelCase_ , escape=lowerCamelCase_ )
elif tokenizer == "penn":
__lowerCamelCase : Dict = sacremoses.MosesTokenizer().penn_tokenize(lowerCamelCase_ , return_str=lowerCamelCase_ )
else:
__lowerCamelCase : Any = sentence
if not return_str:
__lowerCamelCase : str = normalized_sent.split()
return normalized_sent
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
if not (len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == len(lowerCamelCase_ )):
raise ValueError('Sources length must match predictions and references lengths.' )
__lowerCamelCase : Optional[Any] = 0
for src, pred, refs in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
sari_score += SARIsent(normalize(lowerCamelCase_ ) , normalize(lowerCamelCase_ ) , [normalize(lowerCamelCase_ ) for sent in refs] )
__lowerCamelCase : Dict = sari_score / len(lowerCamelCase_ )
return 1_0_0 * sari_score
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="exp" , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , ) -> List[str]:
__lowerCamelCase : Tuple = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__lowerCamelCase : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
__lowerCamelCase : List[Any] = sacrebleu.corpus_bleu(
lowerCamelCase_ , lowerCamelCase_ , smooth_method=lowerCamelCase_ , smooth_value=lowerCamelCase_ , force=lowerCamelCase_ , lowercase=lowerCamelCase_ , use_effective_order=lowerCamelCase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Tuple):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence'),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence') ,id='references'),
}) ,codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] ,reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] ,)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Union[str, Any] = {}
result.update({'sari': compute_sari(sources=SCREAMING_SNAKE_CASE__ ,predictions=SCREAMING_SNAKE_CASE__ ,references=SCREAMING_SNAKE_CASE__)})
result.update({'sacrebleu': compute_sacrebleu(predictions=SCREAMING_SNAKE_CASE__ ,references=SCREAMING_SNAKE_CASE__)})
result.update({'exact': compute_em(predictions=SCREAMING_SNAKE_CASE__ ,references=SCREAMING_SNAKE_CASE__)})
return result
| 652 |
from maths.prime_factors import prime_factors
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCamelCase_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(lowerCamelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCamelCase , lowerCamelCase = array[indexa], array[indexa]
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> None:
if length > 1:
lowerCamelCase = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> None:
if length > 1:
lowerCamelCase = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 533 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase : List[str] = 4
lowerCAmelCase : List[str] = 3
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
pass
def a__ ( snake_case__ ) -> Dict:
for shard in shards:
for i in range(snake_case__ ):
yield {"i": i, "shard": shard}
def a__ ( ) -> List[Any]:
lowerCamelCase = int(os.environ["""RANK"""] )
lowerCamelCase = int(os.environ["""WORLD_SIZE"""] )
lowerCamelCase = ArgumentParser()
parser.add_argument("""--streaming""" , type=snake_case__ )
parser.add_argument("""--local_rank""" , type=snake_case__ )
parser.add_argument("""--num_workers""" , type=snake_case__ , default=0 )
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.streaming
lowerCamelCase = args.num_workers
lowerCamelCase = {"""shards""": [F'shard_{shard_idx}' for shard_idx in range(snake_case__ )]}
lowerCamelCase = IterableDataset.from_generator(snake_case__ , gen_kwargs=snake_case__ )
if not streaming:
lowerCamelCase = Dataset.from_list(list(snake_case__ ) )
lowerCamelCase = split_dataset_by_node(snake_case__ , rank=snake_case__ , world_size=snake_case__ )
lowerCamelCase = torch.utils.data.DataLoader(snake_case__ , num_workers=snake_case__ )
lowerCamelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowerCamelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowerCamelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 533 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ : int = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 512 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 512 | 1 |
from ..utils import DummyObject, requires_backends
class _A ( metaclass=_lowerCamelCase ):
_UpperCamelCase : Optional[int] = ['''torch''', '''scipy''']
def __init__( self : Optional[Any] , *_A : int , **_A : str ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def __a ( cls : Union[str, Any] , *_A : Any , **_A : str ) -> Any:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def __a ( cls : Union[str, Any] , *_A : Tuple , **_A : str ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] ) | 596 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case( __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : int = [-1] * len(__magic_name__ )
def dfs(__magic_name__ , __magic_name__ ):
lowercase : str = True
lowercase : Tuple = c
for u in graph[v]:
if not visited[u]:
dfs(__magic_name__ , 1 - c )
for i in range(len(__magic_name__ ) ):
if not visited[i]:
dfs(__magic_name__ , 0 )
for i in range(len(__magic_name__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 596 | 1 |
import os
import numpy
import onnx
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = a.name
SCREAMING_SNAKE_CASE_ : Dict = b.name
SCREAMING_SNAKE_CASE_ : str = ''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''
SCREAMING_SNAKE_CASE_ : Optional[Any] = a == b
SCREAMING_SNAKE_CASE_ : str = name_a
SCREAMING_SNAKE_CASE_ : str = name_b
return res
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase_ , lowerCamelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase_ , lowerCamelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE_ : List[str] = inits[i].name
SCREAMING_SNAKE_CASE_ : str = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = os.path.dirname(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.basename(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = onnx.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Any = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_ : int = set()
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : int = 0
for i in range(len(lowerCamelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase_ )
dup_set.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = inits[j].data_type
SCREAMING_SNAKE_CASE_ : str = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase_ )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE_ : int = inits[i].name
SCREAMING_SNAKE_CASE_ : List[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 10_24 / 10_24 / 10_24 , 'GB' )
SCREAMING_SNAKE_CASE_ : int = sorted(lowerCamelCase_ )
_remove_dup_initializers_from_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'optimized_' + model_file_name
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
onnx.save(lowerCamelCase_ , lowerCamelCase_ )
return new_model
| 105 |
A : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : Optional[int] = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 371 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = BertConfig.from_json_file(_lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE__ : List[Any] = BertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
__lowercase :str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowercase :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 708 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__lowercase :Tuple = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__lowercase :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__lowercase :List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def A_ ( self : List[Any] ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def A_ ( self : str , a : List[List[List[str]]] , a : List[List[str]] , a : int = 1 , a : int = 4 , ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a )
} | 26 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ : int = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 572 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : Any = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = """timesformer"""
def __init__( self , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=8 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-6 , snake_case__=True , snake_case__="divided_space_time" , snake_case__=0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : Optional[int] = patch_size
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : Any = num_frames
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Any = num_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = intermediate_size
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = initializer_range
_SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = qkv_bias
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_type
_SCREAMING_SNAKE_CASE : str = drop_path_rate
| 572 | 1 |
import numpy as np
def snake_case (UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Dict = 1e-12 , UpperCamelCase : Optional[Any] = 100 , ):
'''simple docstring'''
assert np.shape(_lowercase )[0] == np.shape(_lowercase )[1]
# Ensure proper dimensionality.
assert np.shape(_lowercase )[0] == np.shape(_lowercase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_lowercase ) == np.iscomplexobj(_lowercase )
lowerCamelCase__ = np.iscomplexobj(_lowercase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_lowercase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCamelCase__ = False
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 1e12
while not convergence:
# Multiple matrix by the vector.
lowerCamelCase__ = np.dot(_lowercase , _lowercase )
# Normalize the resulting output vector.
lowerCamelCase__ = w / np.linalg.norm(_lowercase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCamelCase__ = vector.conj().T if is_complex else vector.T
lowerCamelCase__ = np.dot(_lowercase , np.dot(_lowercase , _lowercase ) )
# Check convergence.
lowerCamelCase__ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCamelCase__ = True
lowerCamelCase__ = lambda_
if is_complex:
lowerCamelCase__ = np.real(lambda_ )
return lambda_, vector
def snake_case ():
'''simple docstring'''
lowerCamelCase__ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCamelCase__ = np.array([41, 4, 20] )
lowerCamelCase__ = real_input_matrix.astype(np.complexaaa )
lowerCamelCase__ = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCamelCase__ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCamelCase__ = real_input_matrix
lowerCamelCase__ = real_vector
elif problem_type == "complex":
lowerCamelCase__ = complex_input_matrix
lowerCamelCase__ = complex_vector
# Our implementation.
lowerCamelCase__ = power_iteration(_lowercase , _lowercase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCamelCase__ = np.linalg.eigh(_lowercase )
# Last eigenvalue is the maximum one.
lowerCamelCase__ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCamelCase__ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_lowercase ) - np.abs(_lowercase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 704 |
def snake_case (UpperCamelCase : int = 50 ):
'''simple docstring'''
lowerCamelCase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 235 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
SCREAMING_SNAKE_CASE_: int =['text', 'image', 'audio']
def lowerCAmelCase_ ( snake_case_ : List[str] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(snake_case_ , snake_case_ ):
inputs.append(create_inputs(snake_case_ ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def lowerCAmelCase_ ( snake_case_ : List ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = []
for output in outputs:
if isinstance(snake_case_ , (str, AgentText) ):
output_types.append("text" )
elif isinstance(snake_case_ , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(snake_case_ , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __A :
def _lowercase (self : Optional[Any] ):
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
UpperCAmelCase_ = self.tool.inputs
for _input in inputs:
if isinstance(_input , __a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase_ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = create_inputs(self.tool.inputs )
UpperCAmelCase_ = self.tool(*__a )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase_ = [outputs]
self.assertListEqual(output_types(__a ) , self.tool.outputs )
def _lowercase (self : str ):
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = create_inputs(self.tool.inputs )
UpperCAmelCase_ = self.tool(*__a )
if not isinstance(__a , __a ):
UpperCAmelCase_ = [outputs]
self.assertEqual(len(__a ) , len(self.tool.outputs ) )
for output, output_type in zip(__a , self.tool.outputs ):
UpperCAmelCase_ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__a , __a ) )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = create_inputs(self.tool.inputs )
UpperCAmelCase_ = []
for _input, input_type in zip(__a , self.tool.inputs ):
if isinstance(__a , __a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase_ = self.tool(*__a )
if not isinstance(__a , __a ):
UpperCAmelCase_ = [outputs]
self.assertEqual(len(__a ) , len(self.tool.outputs ) )
| 78 |
from jiwer import compute_measures
import datasets
A__ : Tuple = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
A__ : Optional[int] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
A__ : Dict = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/jitsi/jiwer/'''], reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
], )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : str=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : List[str]=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowerCamelCase, lowerCamelCase )["wer"]
else:
lowercase__ = 0
lowercase__ = 0
for prediction, reference in zip(lowerCamelCase, lowerCamelCase ):
lowercase__ = compute_measures(lowerCamelCase, lowerCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 183 | 0 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCamelCase : str = 'sshleifer/mar_enro_6_3_student'
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def A ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=UpperCAmelCase__ , )
UpperCamelCase = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def A ( self : int ):
"""simple docstring"""
MarianMTModel.from_pretrained(UpperCAmelCase__ )
@slow
@require_torch_gpu
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = {
'''$MAX_LEN''': 6_4,
'''$BS''': 6_4,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCamelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('finetune.py' )[1].strip()
UpperCamelCase = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
UpperCamelCase = bash_script.replace(UpperCAmelCase__ , str(UpperCAmelCase__ ) )
UpperCamelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCamelCase = f"""\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n """.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCamelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(UpperCAmelCase__ , 'argv' , UpperCAmelCase__ ):
UpperCamelCase = argparse.ArgumentParser()
UpperCamelCase = pl.Trainer.add_argparse_args(UpperCAmelCase__ )
UpperCamelCase = SummarizationModule.add_model_specific_args(UpperCAmelCase__ , os.getcwd() )
UpperCamelCase = parser.parse_args()
UpperCamelCase = main(UpperCAmelCase__ )
# Check metrics
UpperCamelCase = load_json(model.metrics_save_path )
UpperCamelCase = metrics['''val'''][0]
UpperCamelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , UpperCAmelCase__ )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 1_7 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase = os.listdir(UpperCAmelCase__ )
UpperCamelCase = [x for x in contents if x.endswith('.ckpt' )][0]
UpperCamelCase = os.path.join(args.output_dir , UpperCAmelCase__ )
UpperCamelCase = torch.load(UpperCAmelCase__ , map_location='cpu' )
UpperCamelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase = {os.path.basename(UpperCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@timeout_decorator.timeout(6_0_0 )
@slow
@require_torch_gpu
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
UpperCamelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 1_2_8,
'''$BS''': 1_6,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCamelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('distillation.py' )[1].strip()
)
UpperCamelCase = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
UpperCamelCase = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
UpperCamelCase = bash_script.replace(UpperCAmelCase__ , str(UpperCAmelCase__ ) )
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = bash_script.replace('--fp16' , '' )
UpperCamelCase = 6
UpperCamelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"""--num_train_epochs={epochs}""",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(UpperCAmelCase__ , 'argv' , UpperCAmelCase__ ):
UpperCamelCase = argparse.ArgumentParser()
UpperCamelCase = pl.Trainer.add_argparse_args(UpperCAmelCase__ )
UpperCamelCase = SummarizationDistiller.add_model_specific_args(UpperCAmelCase__ , os.getcwd() )
UpperCamelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCamelCase = distill_main(UpperCAmelCase__ )
# Check metrics
UpperCamelCase = load_json(model.metrics_save_path )
UpperCamelCase = metrics['''val'''][0]
UpperCamelCase = metrics['''val'''][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , UpperCAmelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCamelCase = os.listdir(UpperCAmelCase__ )
UpperCamelCase = [x for x in contents if x.endswith('.ckpt' )][0]
UpperCamelCase = os.path.join(args.output_dir , UpperCAmelCase__ )
UpperCamelCase = torch.load(UpperCAmelCase__ , map_location='cpu' )
UpperCamelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCamelCase = {os.path.basename(UpperCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 718 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def A ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
UpperCamelCase = DDIMScheduler()
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCamelCase = CLIPTextModel(UpperCamelCase__ )
UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=0 ):
"""simple docstring"""
UpperCamelCase = torch.manual_seed(UpperCamelCase__ )
UpperCamelCase = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Optional[Any] ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Any ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = 'french fries'
UpperCamelCase = sd_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ , view_batch_size=2 )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' )
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , skip_prk_steps=UpperCamelCase__ )
UpperCamelCase = StableDiffusionPanoramaPipeline(**UpperCamelCase__ )
UpperCamelCase = sd_pipe.to(UpperCamelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCamelCase = sd_pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCamelCase = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str] , UpperCamelCase__ : Optional[int]=0 ):
"""simple docstring"""
UpperCamelCase = torch.manual_seed(UpperCamelCase__ )
UpperCamelCase = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = 'stabilityai/stable-diffusion-2-base'
UpperCamelCase = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
UpperCamelCase = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' , safety_checker=UpperCamelCase__ )
UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**UpperCamelCase__ ).images
UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
UpperCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = 0
def callback_fn(UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor ) -> None:
UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
UpperCamelCase = latents[0, -3:, -3:, -1]
UpperCamelCase = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
UpperCamelCase = latents[0, -3:, -3:, -1]
UpperCamelCase = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCamelCase = False
UpperCamelCase = 'stabilityai/stable-diffusion-2-base'
UpperCamelCase = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
UpperCamelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
UpperCamelCase = self.get_inputs()
pipe(**UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A ( self : Optional[int] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase = 'stabilityai/stable-diffusion-2-base'
UpperCamelCase = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
UpperCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
UpperCamelCase = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase = self.get_inputs()
UpperCamelCase = pipe(**UpperCamelCase__ )
UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 324 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = str(__A )
return n == n[::-1]
def lowerCAmelCase_ ( __A = 1_000_000 ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = 0
for i in range(1, __A ):
if is_palindrome(__A ) and is_palindrome(bin(__A ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 486 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model'}
UpperCamelCase__ = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
UpperCamelCase__ = {
'google/reformer-crime-and-punishment': 5_2_4_2_8_8,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = ['input_ids', 'attention_mask']
def __init__(self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple="</s>" , __UpperCAmelCase : List[Any]="<unk>" , __UpperCAmelCase : Union[str, Any]=[] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Union[str, Any] , ) -> None:
"""simple docstring"""
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase_ (self : Dict ) -> Dict[str, int]:
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__(self : Dict , __UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ (self : int , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.piece_to_id(__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCAmelCase__ = self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
UpperCAmelCase__ = []
else:
current_sub_tokens.append(__UpperCAmelCase )
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowercase_ (self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 486 | 1 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 |
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __snake_case : int ) -> Optional[int]:
"""simple docstring"""
random.seed(__snake_case )
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] , lowerCAmelCase_ : float = 0.9999 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Union[float, int] = 1.0 , lowerCAmelCase_ : Union[float, int] = 2 / 3 , lowerCAmelCase_ : Optional[Any] = None , lowerCAmelCase_ : Dict[str, Any] = None , **lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Optional[Any] =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : List[str] =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
A__ : int =True
if kwargs.get("""max_value""" , lowerCAmelCase_ ) is not None:
A__ : Tuple ="""The `max_value` argument is deprecated. Please use `decay` instead."""
deprecate("""max_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Union[str, Any] =kwargs["""max_value"""]
if kwargs.get("""min_value""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `min_value` argument is deprecated. Please use `min_decay` instead."""
deprecate("""min_value""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
A__ : Optional[Any] =kwargs["""min_value"""]
A__ : Any =list(lowerCAmelCase_ )
A__ : int =[p.clone().detach() for p in parameters]
if kwargs.get("""device""" , lowerCAmelCase_ ) is not None:
A__ : List[str] ="""The `device` argument is deprecated. Please use `to` instead."""
deprecate("""device""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ )
self.to(device=kwargs["""device"""] )
A__ : Optional[int] =None
A__ : Any =decay
A__ : List[Any] =min_decay
A__ : Optional[int] =update_after_step
A__ : List[str] =use_ema_warmup
A__ : str =inv_gamma
A__ : Union[str, Any] =power
A__ : str =0
A__ : str =None # set in `step()`
A__ : List[str] =model_cls
A__ : Optional[int] =model_config
@classmethod
def lowercase__ ( cls : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> "EMAModel":
'''simple docstring'''
A__ , A__ : Tuple =model_cls.load_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ )
A__ : Optional[Any] =model_cls.from_pretrained(lowerCAmelCase_ )
A__ : Optional[Any] =cls(model.parameters() , model_cls=lowerCAmelCase_ , model_config=model.config )
ema_model.load_state_dict(lowerCAmelCase_ )
return ema_model
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("""`save_pretrained` can only be used if `model_cls` was defined at __init__.""" )
if self.model_config is None:
raise ValueError("""`save_pretrained` can only be used if `model_config` was defined at __init__.""" )
A__ : Optional[int] =self.model_cls.from_config(self.model_config )
A__ : Optional[Any] =self.state_dict()
state_dict.pop("""shadow_params""" , lowerCAmelCase_ )
model.register_to_config(**lowerCAmelCase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCAmelCase_ )
def lowercase__ ( self : Dict , lowerCAmelCase_ : int ) -> float:
'''simple docstring'''
A__ : Optional[int] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
A__ : List[Any] =1 - (1 + step / self.inv_gamma) ** -self.power
else:
A__ : Union[str, Any] =(1 + step) / (10 + step)
A__ : str =min(lowerCAmelCase_ , self.decay )
# make sure decay is not smaller than min_decay
A__ : int =max(lowerCAmelCase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase_ , torch.nn.Module ):
A__ : Any =(
"""Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """
"""Please pass the parameters of the module instead."""
)
deprecate(
"""passing a `torch.nn.Module` to `ExponentialMovingAverage.step`""" , """1.0.0""" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ , )
A__ : Optional[int] =parameters.parameters()
A__ : Dict =list(lowerCAmelCase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
A__ : Any =self.get_decay(self.optimization_step )
A__ : Optional[int] =decay
A__ : List[str] =1 - decay
A__ : str =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
A__ : List[Any] =deepspeed.zero.GatheredParameters(lowerCAmelCase_ , modifier_rank=lowerCAmelCase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : Optional[Any] =list(lowerCAmelCase_ )
for s_param, param in zip(self.shadow_params , lowerCAmelCase_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : List[Any]=None ) -> None:
'''simple docstring'''
A__ : str =[
p.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ ) if p.is_floating_point() else p.to(device=lowerCAmelCase_ )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
A__ : List[str] =[param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Iterable[torch.nn.Parameter] ) -> None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("""This ExponentialMovingAverage has no `store()`ed weights """ """to `restore()`""" )
for c_param, param in zip(self.temp_stored_params , lowerCAmelCase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
A__ : List[str] =None
def lowercase__ ( self : List[str] , lowerCAmelCase_ : dict ) -> None:
'''simple docstring'''
A__ : List[Any] =copy.deepcopy(lowerCAmelCase_ )
A__ : List[Any] =state_dict.get("""decay""" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("""Decay must be between 0 and 1""" )
A__ : List[Any] =state_dict.get("""min_decay""" , self.min_decay )
if not isinstance(self.min_decay , lowerCAmelCase_ ):
raise ValueError("""Invalid min_decay""" )
A__ : Tuple =state_dict.get("""optimization_step""" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCAmelCase_ ):
raise ValueError("""Invalid optimization_step""" )
A__ : Any =state_dict.get("""update_after_step""" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCAmelCase_ ):
raise ValueError("""Invalid update_after_step""" )
A__ : str =state_dict.get("""use_ema_warmup""" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCAmelCase_ ):
raise ValueError("""Invalid use_ema_warmup""" )
A__ : str =state_dict.get("""inv_gamma""" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("""Invalid inv_gamma""" )
A__ : Tuple =state_dict.get("""power""" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("""Invalid power""" )
A__ : Tuple =state_dict.get("""shadow_params""" , lowerCAmelCase_ )
if shadow_params is not None:
A__ : List[str] =shadow_params
if not isinstance(self.shadow_params , lowerCAmelCase_ ):
raise ValueError("""shadow_params must be a list""" )
if not all(isinstance(lowerCAmelCase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("""shadow_params must all be Tensors""" )
| 687 | 1 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def __UpperCAmelCase ( __UpperCamelCase ):
@wraps(lowerCamelCase_ )
def _inner_fn(*__UpperCamelCase , **__UpperCamelCase ):
warnings.warn(
(f"""\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.""") , lowerCamelCase_ , )
return fn(*lowerCamelCase_ , **lowerCamelCase_ )
return _inner_fn
| 76 |
'''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__A : List[Any] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__A : Optional[Any] = logging.get_logger(__name__)
class __UpperCamelCase ( lowercase__ ):
lowercase : Union[str, Any] = 'maskformer'
lowercase : List[str] = {'hidden_size': 'mask_feature_size'}
lowercase : int = ['resnet', 'swin']
lowercase : List[str] = ['detr']
def __init__( self :Dict ,_UpperCamelCase :int = 2_5_6 ,_UpperCamelCase :int = 2_5_6 ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :bool = False ,_UpperCamelCase :Optional[Dict] = None ,_UpperCamelCase :Optional[Dict] = None ,_UpperCamelCase :float = 0.02 ,_UpperCamelCase :float = 1.0 ,_UpperCamelCase :float = 1.0 ,_UpperCamelCase :float = 1.0 ,_UpperCamelCase :float = 20.0 ,_UpperCamelCase :Optional[bool] = None ,**_UpperCamelCase :List[str] ,):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
snake_case_ : Any = SwinConfig(
image_size=3_8_4 ,in_channels=3 ,patch_size=4 ,embed_dim=1_2_8 ,depths=[2, 2, 1_8, 2] ,num_heads=[4, 8, 1_6, 3_2] ,window_size=1_2 ,drop_path_rate=0.3 ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
snake_case_ : Optional[Any] = backbone_config.pop("""model_type""" )
snake_case_ : List[Any] = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[Any] = config_class.from_dict(_UpperCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
snake_case_ : str = DetrConfig()
else:
# verify that the decoder is supported
snake_case_ : Tuple = (
decoder_config.pop("""model_type""" ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'''Transformer Decoder {decoder_type} not supported, please use one of'''
F''' {",".join(self.decoders_supported )}''' )
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
snake_case_ : Optional[Any] = CONFIG_MAPPING[decoder_type]
snake_case_ : List[Any] = config_class.from_dict(_UpperCamelCase )
snake_case_ : List[Any] = backbone_config
snake_case_ : str = decoder_config
# main feature dimension for the model
snake_case_ : Dict = fpn_feature_size
snake_case_ : Any = mask_feature_size
# initializer
snake_case_ : str = init_std
snake_case_ : str = init_xavier_std
# Hungarian matcher && loss
snake_case_ : Any = cross_entropy_weight
snake_case_ : Optional[int] = dice_weight
snake_case_ : str = mask_weight
snake_case_ : Any = use_auxiliary_loss
snake_case_ : Optional[int] = no_object_weight
snake_case_ : Tuple = output_auxiliary_logits
snake_case_ : Tuple = self.decoder_config.encoder_attention_heads
snake_case_ : Optional[int] = self.decoder_config.num_hidden_layers
super().__init__(**_UpperCamelCase )
@classmethod
def a__ ( cls :str ,_UpperCamelCase :PretrainedConfig ,_UpperCamelCase :PretrainedConfig ,**_UpperCamelCase :Any ):
return cls(
backbone_config=_UpperCamelCase ,decoder_config=_UpperCamelCase ,**_UpperCamelCase ,)
def a__ ( self :Optional[int] ):
snake_case_ : List[str] = copy.deepcopy(self.__dict__ )
snake_case_ : List[str] = self.backbone_config.to_dict()
snake_case_ : List[str] = self.decoder_config.to_dict()
snake_case_ : List[Any] = self.__class__.model_type
return output | 334 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__snake_case: Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=lowerCAmelCase__ )
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = 42
a_ = None
a_ = None
a_ = None
@dataclass(frozen=lowerCAmelCase__ )
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
a_ = None
a_ = None
a_ = None
a_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 42
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_ = False , ):
'''simple docstring'''
a_ : Optional[int] = hans_processors[task]()
a_ : Union[str, Any] = os.path.join(
lowerCAmelCase_ , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(lowerCAmelCase_ ) , lowerCAmelCase_ , ) , )
a_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a_ , a_ : List[Any] = label_list[2], label_list[1]
a_ : Union[str, Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a_ : Tuple = cached_features_file + """.lock"""
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
a_ : int = torch.load(lowerCAmelCase_ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
a_ : List[str] = (
processor.get_dev_examples(lowerCAmelCase_ ) if evaluate else processor.get_train_examples(lowerCAmelCase_ )
)
logger.info("""Training examples: %s""" , len(lowerCAmelCase_ ) )
a_ : Optional[int] = hans_convert_examples_to_features(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase_ )
torch.save(self.features , lowerCAmelCase_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , lowerCAmelCase_ ):
'''simple docstring'''
return self.features[i]
def _lowerCAmelCase ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase :
"""simple docstring"""
a_ = 42
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1_28 , lowerCAmelCase_=False , lowerCAmelCase_ = False , ):
'''simple docstring'''
a_ : List[Any] = hans_processors[task]()
a_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a_ , a_ : List[str] = label_list[2], label_list[1]
a_ : Any = label_list
a_ : Union[str, Any] = processor.get_dev_examples(lowerCAmelCase_ ) if evaluate else processor.get_train_examples(lowerCAmelCase_ )
a_ : Union[str, Any] = hans_convert_examples_to_features(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(lowerCAmelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
a_ : str = tf.data.Dataset.from_generator(
lowerCAmelCase_ , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , lowerCAmelCase_ ):
'''simple docstring'''
return self.features[i]
def _lowerCAmelCase ( self ):
'''simple docstring'''
return self.label_list
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase_ , """heuristics_train_set.txt""" ) ) , """train""" )
def _lowerCAmelCase ( self , lowerCAmelCase_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase_ , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def _lowerCAmelCase ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Any = []
for i, line in enumerate(lowerCAmelCase_ ):
if i == 0:
continue
a_ : Optional[Any] = """%s-%s""" % (set_type, line[0])
a_ : Optional[Any] = line[5]
a_ : Union[str, Any] = line[6]
a_ : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
a_ : Any = line[0]
examples.append(InputExample(guid=lowerCAmelCase_ , text_a=lowerCAmelCase_ , text_b=lowerCAmelCase_ , label=lowerCAmelCase_ , pairID=lowerCAmelCase_ ) )
return examples
def _snake_case ( A_ : List[InputExample] , A_ : List[str] , A_ : int , A_ : PreTrainedTokenizer , ):
"""simple docstring"""
a_ : Any = {label: i for i, label in enumerate(A_ )}
a_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(A_ ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
a_ : Dict = tokenizer(
example.text_a , example.text_b , add_special_tokens=A_ , max_length=A_ , padding="""max_length""" , truncation=A_ , return_overflowing_tokens=A_ , )
a_ : Any = label_map[example.label] if example.label in label_map else 0
a_ : int = int(example.pairID )
features.append(InputFeatures(**A_ , label=A_ , pairID=A_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
__snake_case: Tuple = {
"hans": 3,
}
__snake_case: Dict = {
"hans": HansProcessor,
}
| 460 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = """ylacombe/bark-small"""
a_ : Dict = tempfile.mkdtemp()
a_ : Union[str, Any] = """en_speaker_1"""
a_ : Dict = """This is a test string"""
a_ : Optional[int] = """speaker_embeddings_path.json"""
a_ : int = """speaker_embeddings"""
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.get_tokenizer()
a_ : Optional[Any] = BarkProcessor(tokenizer=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
a_ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
a_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a_ : Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
a_ : Dict = 35
a_ : List[Any] = 2
a_ : Optional[int] = 8
a_ : int = {
"""semantic_prompt""": np.ones(lowerCAmelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
a_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase_ )
a_ : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
a_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCAmelCase_ , **lowerCAmelCase_ )
a_ : Any = processor(text=self.input_string , voice_preset=lowerCAmelCase_ )
a_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
a_ : Any = processor(text=self.input_string , voice_preset=self.voice_preset )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = self.get_tokenizer()
a_ : Union[str, Any] = BarkProcessor(tokenizer=lowerCAmelCase_ )
a_ : Optional[int] = processor(text=self.input_string )
a_ : Optional[int] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 460 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a_ ( _a ):
@slow
@require_torch
def UpperCamelCase_ ( self ):
_lowercase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_lowercase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_lowercase = bertabert.config.encoder.vocab_size
_lowercase = tokenizer.sep_token_id
_lowercase = tokenizer.cls_token_id
_lowercase = 128
_lowercase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_lowercase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_lowercase = train_dataset.select(range(32 ) )
_lowercase = val_dataset.select(range(16 ) )
_lowercase = 4
def _map_to_encoder_decoder_inputs(__UpperCamelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowercase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__UpperCamelCase , max_length=512 )
_lowercase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__UpperCamelCase , max_length=128 )
_lowercase = inputs.input_ids
_lowercase = inputs.attention_mask
_lowercase = outputs.input_ids
_lowercase = outputs.input_ids.copy()
_lowercase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_lowercase = outputs.attention_mask
assert all(len(__UpperCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCamelCase ):
_lowercase = pred.label_ids
_lowercase = pred.predictions
# all unnecessary tokens are removed
_lowercase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
_lowercase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
_lowercase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCamelCase ) )] ) / len(__UpperCamelCase )
return {"accuracy": accuracy}
# map train dataset
_lowercase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCamelCase , batch_size=__UpperCamelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_lowercase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCamelCase , batch_size=__UpperCamelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_lowercase = self.get_auto_remove_tmp_dir()
_lowercase = SeqaSeqTrainingArguments(
output_dir=__UpperCamelCase , per_device_train_batch_size=__UpperCamelCase , per_device_eval_batch_size=__UpperCamelCase , predict_with_generate=__UpperCamelCase , evaluation_strategy="""steps""" , do_train=__UpperCamelCase , do_eval=__UpperCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowercase = SeqaSeqTrainer(
model=__UpperCamelCase , args=__UpperCamelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , tokenizer=__UpperCamelCase , )
# start training
trainer.train() | 287 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
A : Union[str, Any] = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
A : Union[str, Any] = dataset.iloc[:, 1:2].values
A : Dict = dataset.iloc[:, 2].values
A , A , A , A : str = train_test_split(X, y, test_size=0.2, random_state=0)
A : Union[str, Any] = PolynomialFeatures(degree=4)
A : str = poly_reg.fit_transform(X)
A : Union[str, Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCamelCase__ ( ) -> Any:
plt.scatter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , color="""red""" )
plt.plot(SCREAMING_SNAKE_CASE_ , pol_reg.predict(poly_reg.fit_transform(SCREAMING_SNAKE_CASE_ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 287 | 1 |
'''simple docstring'''
_lowercase = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_lowercase = frozenset(["""prompt""", """negative_prompt"""])
_lowercase = frozenset([])
_lowercase = frozenset(["""image"""])
_lowercase = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_lowercase = frozenset(["""image"""])
_lowercase = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_lowercase = frozenset(["""prompt""", """image""", """negative_prompt"""])
_lowercase = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
_lowercase = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
_lowercase = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_lowercase = frozenset(["""image""", """mask_image"""])
_lowercase = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
_lowercase = frozenset(["""example_image""", """image""", """mask_image"""])
_lowercase = frozenset(["""class_labels"""])
_lowercase = frozenset(["""class_labels"""])
_lowercase = frozenset(["""batch_size"""])
_lowercase = frozenset([])
_lowercase = frozenset(["""batch_size"""])
_lowercase = frozenset([])
_lowercase = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
_lowercase = frozenset(["""prompt""", """negative_prompt"""])
_lowercase = frozenset(["""input_tokens"""])
_lowercase = frozenset(["""input_tokens"""])
| 427 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCamelCase__ ( a , a=None ):
require_version(deps[pkg] , a )
| 427 | 1 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = "Hello, World!"
UpperCamelCase = "en_XX"
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : List[Any] = Path('data_bin' )
_lowercase : str = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(SCREAMING_SNAKE_CASE ).parent ) , checkpoint_file=Path(SCREAMING_SNAKE_CASE ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(SCREAMING_SNAKE_CASE ) , bpe='sentencepiece' , sentencepiece_model=str(Path(SCREAMING_SNAKE_CASE ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(SCREAMING_SNAKE_CASE )
_lowercase : str = xmod.model.encoder.sentence_encoder
_lowercase : List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_lowercase : List[str] = xmod.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our X-MOD config:' , SCREAMING_SNAKE_CASE )
_lowercase : Tuple = XmodForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XmodForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowercase : int = xmod_sent_encoder.embed_tokens.weight
_lowercase : Union[str, Any] = xmod_sent_encoder.embed_positions.weight
_lowercase : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_lowercase : str = xmod_sent_encoder.layernorm_embedding.weight
_lowercase : List[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowercase : str = model.roberta.encoder.layer[i]
_lowercase : Dict = xmod_sent_encoder.layers[i]
# self attention
_lowercase : List[Any] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
_lowercase : Optional[int] = xmod_layer.self_attn.q_proj.weight
_lowercase : Dict = xmod_layer.self_attn.q_proj.bias
_lowercase : int = xmod_layer.self_attn.k_proj.weight
_lowercase : str = xmod_layer.self_attn.k_proj.bias
_lowercase : Dict = xmod_layer.self_attn.v_proj.weight
_lowercase : int = xmod_layer.self_attn.v_proj.bias
# self-attention output
_lowercase : List[str] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
_lowercase : Any = xmod_layer.self_attn.out_proj.weight
_lowercase : Dict = xmod_layer.self_attn.out_proj.bias
_lowercase : Union[str, Any] = xmod_layer.self_attn_layer_norm.weight
_lowercase : Optional[int] = xmod_layer.self_attn_layer_norm.bias
# intermediate
_lowercase : Optional[int] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
_lowercase : Optional[Any] = xmod_layer.fca.weight
_lowercase : Tuple = xmod_layer.fca.bias
# output
_lowercase : List[str] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
_lowercase : Tuple = xmod_layer.fca.weight
_lowercase : Optional[Any] = xmod_layer.fca.bias
_lowercase : Optional[int] = xmod_layer.final_layer_norm.weight
_lowercase : List[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_lowercase : Optional[int] = xmod_layer.adapter_layer_norm.weight
_lowercase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_lowercase : Dict = bert_output.adapter_modules[lang_code]
_lowercase : Tuple = xmod_layer.adapter_modules[lang_code]
_lowercase : int = from_adapter.fca.weight
_lowercase : str = from_adapter.fca.bias
_lowercase : int = from_adapter.fca.weight
_lowercase : Tuple = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_lowercase : int = xmod_sent_encoder.layer_norm.weight
_lowercase : List[str] = xmod_sent_encoder.layer_norm.bias
if classification_head:
_lowercase : Dict = xmod.model.classification_heads['mnli'].dense.weight
_lowercase : int = xmod.model.classification_heads['mnli'].dense.bias
_lowercase : List[Any] = xmod.model.classification_heads['mnli'].out_proj.weight
_lowercase : Optional[int] = xmod.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
_lowercase : Union[str, Any] = xmod.model.encoder.lm_head.dense.weight
_lowercase : str = xmod.model.encoder.lm_head.dense.bias
_lowercase : Dict = xmod.model.encoder.lm_head.layer_norm.weight
_lowercase : Any = xmod.model.encoder.lm_head.layer_norm.bias
_lowercase : List[Any] = xmod.model.encoder.lm_head.weight
_lowercase : Dict = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowercase : int = xmod.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
_lowercase : Union[str, Any] = xmod.model.classification_heads['mnli'](xmod.extract_features(SCREAMING_SNAKE_CASE ) )
else:
_lowercase : Tuple = xmod.model(SCREAMING_SNAKE_CASE , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_lowercase : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
_lowercase : int = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCamelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 66 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : Any = "upernet"
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=3_8_4 , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=2_5_5 , **_lowerCAmelCase , ):
super().__init__(**_lowerCAmelCase )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowercase : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[Any] = backbone_config.get('model_type' )
_lowercase : str = CONFIG_MAPPING[backbone_model_type]
_lowercase : Tuple = config_class.from_dict(_lowerCAmelCase )
_lowercase : Optional[Any] = backbone_config
_lowercase : Any = hidden_size
_lowercase : Any = initializer_range
_lowercase : Tuple = pool_scales
_lowercase : List[Any] = use_auxiliary_head
_lowercase : Optional[Any] = auxiliary_loss_weight
_lowercase : Any = auxiliary_in_channels
_lowercase : Any = auxiliary_channels
_lowercase : List[str] = auxiliary_num_convs
_lowercase : List[str] = auxiliary_concat_input
_lowercase : Tuple = loss_ignore_index
def __a ( self ):
_lowercase : str = copy.deepcopy(self.__dict__ )
_lowercase : Tuple = self.backbone_config.to_dict()
_lowercase : int = self.__class__.model_type
return output
| 66 | 1 |
from __future__ import annotations
class lowerCamelCase__ :
def __init__(self : int , _snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = order
# a_{0} ... a_{k}
lowerCamelCase_ : Any = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCamelCase_ : Optional[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCamelCase_ : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCamelCase_ : List[Any] = [0.0] * self.order
def UpperCAmelCase_ (self : Union[str, Any] , _snake_case : list[float] , _snake_case : list[float] ) -> None:
"""simple docstring"""
if len(_snake_case ) < self.order:
lowerCamelCase_ : Any = [1.0, *a_coeffs]
if len(_snake_case ) != self.order + 1:
lowerCamelCase_ : Optional[int] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_snake_case )}'
)
raise ValueError(_snake_case )
if len(_snake_case ) != self.order + 1:
lowerCamelCase_ : List[str] = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(_snake_case )}'
)
raise ValueError(_snake_case )
lowerCamelCase_ : Any = a_coeffs
lowerCamelCase_ : List[Any] = b_coeffs
def UpperCAmelCase_ (self : Tuple , _snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ : List[str] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCamelCase_ : Union[str, Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCamelCase_ : int = self.input_history[:-1]
lowerCamelCase_ : List[str] = self.output_history[:-1]
lowerCamelCase_ : Dict = sample
lowerCamelCase_ : List[str] = result
return result
| 703 |
def _a ( lowerCamelCase__ ) -> float:
return 10 - x * x
def _a ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCamelCase__ ) * equation(lowerCamelCase__ ) >= 0:
raise ValueError('Wrong space!' )
lowerCamelCase_ : Any = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase_ : Union[str, Any] = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase__ ) * equation(lowerCamelCase__ ) < 0:
lowerCamelCase_ : Optional[int] = c
else:
lowerCamelCase_ : str = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 144 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = sylvester(number - 1 )
UpperCAmelCase_ = num - 1
UpperCAmelCase_ = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 579 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 'yolos'
def __init__( self , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=[512, 864] , lowerCAmelCase=16 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=100 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=1 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=0.1 , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = num_detection_tokens
UpperCAmelCase_ = use_mid_position_embeddings
UpperCAmelCase_ = auxiliary_loss
# Hungarian matcher
UpperCAmelCase_ = class_cost
UpperCAmelCase_ = bbox_cost
UpperCAmelCase_ = giou_cost
# Loss coefficients
UpperCAmelCase_ = bbox_loss_coefficient
UpperCAmelCase_ = giou_loss_coefficient
UpperCAmelCase_ = eos_coefficient
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : int = version.parse('1.11' )
@property
def A__ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A__ ( self ):
return 1e-4
@property
def A__ ( self ):
return 12
| 579 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A : Tuple = pytest.mark.integration
@require_faiss
class A (__UpperCAmelCase ):
'''simple docstring'''
def a_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A__ = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(lowerCAmelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
import faiss
A__ = self._create_dummy_dataset()
A__ = dset.map(
lambda __lowerCAmelCase , __lowerCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ )
A__ = dset.add_faiss_index("""vecs""" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
A__ , A__ = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def a_ ( self : int ) -> Any:
"""simple docstring"""
import faiss
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
A__ , A__ = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def a_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
import faiss
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
A__ , A__ = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def a_ ( self : Any ) -> List[str]:
"""simple docstring"""
A__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(lowerCAmelCase_ , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
A__ = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
A__ = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
A__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
A__ = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=lowerCAmelCase_ )
A__ , A__ = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class A (__UpperCAmelCase ):
'''simple docstring'''
def a_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
A__ = np.eye(5 , dtype=np.floataa )[::-1]
A__ , A__ = index.search_batch(lowerCAmelCase_ )
self.assertRaises(lowerCAmelCase_ , index.search_batch , queries[0] )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCAmelCase_ )
def a_ ( self : Any ) -> int:
"""simple docstring"""
import faiss
A__ = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
A__ = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCAmelCase_ ):
A__ = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def a_ ( self : Tuple ) -> str:
"""simple docstring"""
import faiss
A__ = faiss.IndexFlat(5 )
A__ = FaissIndex(custom_index=lowerCAmelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def a_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase_ ) as tmp_file:
index.save(tmp_file.name )
A__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(lowerCAmelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __lowerCamelCase ( __a :List[str] ) -> List[str]:
"""simple docstring"""
import faiss
A__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
A__ = """index.faiss"""
A__ = F'mock://{index_name}'
index.save(__lowerCAmelCase , storage_options=mockfs.storage_options )
A__ = FaissIndex.load(__lowerCAmelCase , storage_options=mockfs.storage_options )
A__ = np.zeros(5 , dtype=np.floataa )
A__ = 1
A__ , A__ = index.search(__lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A (__UpperCAmelCase ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
A__ = Elasticsearch()
A__ = {"""acknowledged""": True}
A__ = ElasticSearchIndex(es_client=lowerCAmelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
A__ = """foo"""
A__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
A__ , A__ = index.search(lowerCAmelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
A__ = """foo"""
A__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
A__ , A__ = index.search(lowerCAmelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
A__ = ["""foo""", """bar""", """foobar"""]
A__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
A__ , A__ = index.search_batch(lowerCAmelCase_ )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
# batched queries with timeout
A__ = ["""foo""", """bar""", """foobar"""]
A__ = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
A__ , A__ = index.search_batch(lowerCAmelCase_ , request_timeout=30 )
A__ = [scores[0] for scores in total_scores]
A__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase_ )
| 712 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Any = 1_6
A : List[Any] = 3_2
def __lowerCamelCase ( __a :Accelerator , __a :int = 1_6 ) -> str:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__a :Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
__a , batched=__a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__a :Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 1_6
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
__a , padding="""longest""" , max_length=__a , pad_to_multiple_of=__a , return_tensors="""pt""" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__a , collate_fn=__a , batch_size=__a )
A__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A : Optional[Any] = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( __a :List[Any] , __a :Optional[Any] ) -> List[Any]:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __a ) == "1":
A__ = 2
# New Code #
A__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__a )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["""lr"""]
A__ = int(config["""num_epochs"""] )
A__ = int(config["""seed"""] )
A__ = int(config["""batch_size"""] )
A__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__a )
A__ , A__ = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__a ):
A__ = model(**__a )
A__ = output.loss
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**__a )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__a , references=__a , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __a )
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__a , default=__a , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
A__ = parser.parse_args()
A__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 247 | 0 |
import torch
from torch import nn
class lowerCAmelCase_ ( nn.Module ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=1 ,snake_case__=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Any = n_token
SCREAMING_SNAKE_CASE_ : Tuple = d_embed
SCREAMING_SNAKE_CASE_ : int = d_proj
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cutoffs + [n_token]
SCREAMING_SNAKE_CASE_ : Dict = [0] + self.cutoffs
SCREAMING_SNAKE_CASE_ : int = div_val
SCREAMING_SNAKE_CASE_ : Tuple = self.cutoffs[0]
SCREAMING_SNAKE_CASE_ : str = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE_ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
SCREAMING_SNAKE_CASE_ : int = nn.Parameter(torch.zeros(self.n_clusters ) )
SCREAMING_SNAKE_CASE_ : int = nn.ModuleList()
SCREAMING_SNAKE_CASE_ : List[str] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case__ ,snake_case__ ) ) )
else:
self.out_projs.append(snake_case__ )
self.out_layers.append(nn.Linear(snake_case__ ,snake_case__ ) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(snake_case__ ,snake_case__ ) ) )
self.out_layers.append(nn.Linear(snake_case__ ,r_idx - l_idx ) )
SCREAMING_SNAKE_CASE_ : Any = keep_order
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
if proj is None:
SCREAMING_SNAKE_CASE_ : List[str] = nn.functional.linear(snake_case__ ,snake_case__ ,bias=snake_case__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
SCREAMING_SNAKE_CASE_ : int = nn.functional.linear(snake_case__ ,proj.t().contiguous() )
SCREAMING_SNAKE_CASE_ : Any = nn.functional.linear(snake_case__ ,snake_case__ ,bias=snake_case__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case ( self ,snake_case__ ,snake_case__=None ,snake_case__=False ):
if labels is not None:
# Shift so that tokens < n predict n
SCREAMING_SNAKE_CASE_ : Dict = hidden[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden.view(-1 ,hidden.size(-1 ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE_ : str = self._compute_logit(snake_case__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
SCREAMING_SNAKE_CASE_ : Tuple = labels != -100
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.zeros_like(snake_case__ ,dtype=hidden.dtype ,device=hidden.device )
SCREAMING_SNAKE_CASE_ : int = (
-nn.functional.log_softmax(snake_case__ ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.functional.log_softmax(snake_case__ ,dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ : Any = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = self.out_layers[i].weight
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(snake_case__ )
biases.append(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self._compute_logit(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.functional.log_softmax(snake_case__ ,dim=1 )
if labels is None:
SCREAMING_SNAKE_CASE_ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.zeros_like(snake_case__ ,dtype=hidden.dtype ,device=hidden.device )
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : str = [0] + self.cutoffs
for i in range(len(snake_case__ ) - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
SCREAMING_SNAKE_CASE_ : List[str] = (labels >= l_idx) & (labels < r_idx)
SCREAMING_SNAKE_CASE_ : Tuple = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
SCREAMING_SNAKE_CASE_ : Any = labels.index_select(0 ,snake_case__ ) - l_idx
SCREAMING_SNAKE_CASE_ : List[str] = head_logprob.index_select(0 ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = hidden.index_select(0 ,snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : Any = hidden
if i == 0:
if labels is not None:
SCREAMING_SNAKE_CASE_ : List[str] = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE_ : Any = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE_ : int = self._compute_logit(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = nn.functional.log_softmax(snake_case__ ,dim=1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
SCREAMING_SNAKE_CASE_ : Dict = logprob_i
if labels is not None:
if (hasattr(self ,'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 ,snake_case__ ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def snake_case ( self ,snake_case__ ):
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._compute_logit(snake_case__ ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(snake_case__ ,dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE_ : int = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE_ : Any = self.out_layers[i].weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE_ : str = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(snake_case__ )
biases.append(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = self._compute_logit(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
SCREAMING_SNAKE_CASE_ : str = nn.functional.log_softmax(snake_case__ ,dim=1 )
SCREAMING_SNAKE_CASE_ : Any = [0] + self.cutoffs
for i in range(len(snake_case__ ) - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE_ : Optional[int] = self._compute_logit(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = nn.functional.log_softmax(snake_case__ ,dim=1 )
SCREAMING_SNAKE_CASE_ : Any = head_logprob[:, -i] + tail_logprob_i
SCREAMING_SNAKE_CASE_ : Dict = logprob_i
return out
| 105 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
a__ : str =argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
a__ : Any =parser.parse_args()
a__ : Union[str, Any] =download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 399 | 0 |
import random
class _UpperCamelCase :
@staticmethod
def __UpperCAmelCase ( __UpperCamelCase )-> tuple[list[int], list[int]]:
__lowerCAmelCase = [ord(__UpperCamelCase ) for i in text]
__lowerCAmelCase = []
__lowerCAmelCase = []
for i in plain:
__lowerCAmelCase = random.randint(1 , 3_0_0 )
__lowerCAmelCase = (i + k) * k
cipher.append(__UpperCamelCase )
key.append(__UpperCamelCase )
return cipher, key
@staticmethod
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase )-> str:
__lowerCAmelCase = []
for i in range(len(__UpperCamelCase ) ):
__lowerCAmelCase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(__UpperCamelCase ) )
return "".join(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase : List[Any] = Onepad().encrypt('''Hello''')
print(c, k)
print(Onepad().decrypt(c, k))
| 712 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : str = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _UpperCamelCase (a_ ):
snake_case_ = 42
class _UpperCamelCase (a_ ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Dict:
super().__init__()
self.register_modules(
prior=__UpperCamelCase , image_encoder=__UpperCamelCase , image_processor=__UpperCamelCase , scheduler=__UpperCamelCase , renderer=__UpperCamelCase , )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
if latents is None:
__lowerCAmelCase = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__lowerCAmelCase = latents.to(__UpperCamelCase )
__lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , __UpperCamelCase=0 )-> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__lowerCAmelCase = torch.device(F"""cuda:{gpu_id}""" )
__lowerCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
@property
def __UpperCAmelCase ( self )-> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Union[str, Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase = torch.cat(__UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(__UpperCamelCase , axis=0 )
if not isinstance(__UpperCamelCase , torch.Tensor ):
__lowerCAmelCase = self.image_processor(__UpperCamelCase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
__lowerCAmelCase = image.to(dtype=self.image_encoder.dtype , device=__UpperCamelCase )
__lowerCAmelCase = self.image_encoder(__UpperCamelCase )["last_hidden_state"]
__lowerCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__lowerCAmelCase = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase = torch.zeros_like(__UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 2_5 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 4.0 , __UpperCamelCase = 6_4 , __UpperCamelCase = "pil" , __UpperCamelCase = True , )-> Dict:
if isinstance(__UpperCamelCase , PIL.Image.Image ):
__lowerCAmelCase = 1
elif isinstance(__UpperCamelCase , torch.Tensor ):
__lowerCAmelCase = image.shape[0]
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__lowerCAmelCase = len(__UpperCamelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__UpperCamelCase )}""" )
__lowerCAmelCase = self._execution_device
__lowerCAmelCase = batch_size * num_images_per_prompt
__lowerCAmelCase = guidance_scale > 1.0
__lowerCAmelCase = self._encode_image(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# prior
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
__lowerCAmelCase = self.scheduler.timesteps
__lowerCAmelCase = self.prior.config.num_embeddings
__lowerCAmelCase = self.prior.config.embedding_dim
__lowerCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__lowerCAmelCase = latents.reshape(latents.shape[0] , __UpperCamelCase , __UpperCamelCase )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = self.prior(
__UpperCamelCase , timestep=__UpperCamelCase , proj_embedding=__UpperCamelCase , ).predicted_image_embedding
# remove the variance
__lowerCAmelCase , __lowerCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__lowerCAmelCase , __lowerCAmelCase = noise_pred.chunk(2 )
__lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__lowerCAmelCase = self.scheduler.step(
__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__UpperCamelCase )
__lowerCAmelCase = []
for i, latent in enumerate(__UpperCamelCase ):
print()
__lowerCAmelCase = self.renderer.decode(
latent[None, :] , __UpperCamelCase , size=__UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(__UpperCamelCase )
__lowerCAmelCase = torch.stack(__UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
__lowerCAmelCase = images.cpu().numpy()
if output_type == "pil":
__lowerCAmelCase = [self.numpy_to_pil(__UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__UpperCamelCase )
| 290 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( lowercase = "" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
SCREAMING_SNAKE_CASE : Union[str, Any] = BeautifulSoup(requests.get(_lowerCAmelCase ).text , "html.parser" )
SCREAMING_SNAKE_CASE : List[str] = soup.find_all("td" , attrs="titleColumn" )
SCREAMING_SNAKE_CASE : Dict = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowerCAmelCase , _lowerCAmelCase )
}
def lowerCamelCase__ ( lowercase = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = get_imdb_top_aaa_movies()
with open(_lowerCAmelCase , "w" , newline="" ) as out_file:
SCREAMING_SNAKE_CASE : str = csv.writer(_lowerCAmelCase )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 62 | """simple docstring"""
import argparse
import json
from tqdm import tqdm
def a_ ( ):
'''simple docstring'''
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=_lowerCAmelCase , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=_lowerCAmelCase , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=_lowerCAmelCase , help='where to store parsed gold_data_path file' , )
lowercase__ : Union[str, Any] = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowercase__ : List[str] = json.load(_lowerCAmelCase )
for dpr_record in tqdm(_lowerCAmelCase ):
lowercase__ : Any = dpr_record['question']
lowercase__ : Optional[Any] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(_lowerCAmelCase ) + '\n' )
if __name__ == "__main__":
main()
| 599 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708 | """simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Union[str, Any] = 'M-CLIP'
def __init__( self ,__UpperCAmelCase=10_24 ,__UpperCAmelCase=7_68 ,**__UpperCAmelCase ) -> List[str]:
A__ = transformerDimSize
A__ = imageDimSize
super().__init__(**__UpperCAmelCase )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Union[str, Any] = MCLIPConfig
def __init__( self ,__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> List[Any]:
super().__init__(__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase )
A__ = XLMRobertaModel(__UpperCAmelCase )
A__ = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
A__ = self.transformer(input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase )[0]
A__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__UpperCAmelCase ), embs
| 536 | 0 |
'''simple docstring'''
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[Any] = [0 for i in range(len(__a ) )]
# initialize interval's left pointer and right pointer
_a, _a : Dict = 0, 0
for i in range(1 , len(__a ) ):
# case when current index is inside the interval
if i <= right_pointer:
_a : Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_a : Any = min_edge
while go_next(__a , __a , __a ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_a, _a : Union[str, Any] = i, i + z_result[i] - 1
return z_result
def UpperCAmelCase_ (__a : int , __a : list[int] , __a : str ):
"""simple docstring"""
return i + z_result[i] < len(__a ) and s[z_result[i]] == s[i + z_result[i]]
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
_a : List[str] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_a : Any = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__a ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = StableDiffusionLDMaDPipeline
__UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_a : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=_a ,set_alpha_to_one=_a ,)
torch.manual_seed(0 )
_a : Tuple = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : List[str] = CLIPTextModel(_a )
_a : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : Tuple=0 ):
'''simple docstring'''
if str(_a ).startswith('mps' ):
_a : Optional[Any] = torch.manual_seed(_a )
else:
_a : Dict = torch.Generator(device=_a ).manual_seed(_a )
_a : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Optional[Any] = self.get_dummy_components()
_a : int = StableDiffusionLDMaDPipeline(**_a )
_a : str = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : Optional[int] = self.get_dummy_inputs(_a )
_a : Union[str, Any] = ldmad_pipe(**_a )
_a, _a : str = output.rgb, output.depth
_a : int = rgb[0, -3:, -3:, -1]
_a : List[str] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_a : Dict = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
_a : Union[str, Any] = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.get_dummy_components()
_a : int = StableDiffusionLDMaDPipeline(**_a )
_a : List[Any] = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : Any = self.get_dummy_inputs(_a )
_a : List[str] = 3 * [inputs['prompt']]
# forward
_a : int = ldmad_pipe(**_a )
_a, _a : Union[str, Any] = output.rgb, output.depth
_a : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1]
_a : Tuple = depth_slice_a[0, -3:, -1]
_a : Tuple = self.get_dummy_inputs(_a )
_a : Tuple = 3 * [inputs.pop('prompt' )]
_a : List[Any] = ldmad_pipe.tokenizer(
_a ,padding='max_length' ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=_a ,return_tensors='pt' ,)
_a : Union[str, Any] = text_inputs['input_ids'].to(_a )
_a : List[str] = ldmad_pipe.text_encoder(_a )[0]
_a : int = prompt_embeds
# forward
_a : str = ldmad_pipe(**_a )
_a, _a : str = output.rgb, output.depth
_a : Tuple = rgb_slice_a[0, -3:, -3:, -1]
_a : Optional[Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : Dict = PNDMScheduler(skip_prk_steps=_a )
_a : str = StableDiffusionLDMaDPipeline(**_a )
_a : str = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = self.get_dummy_inputs(_a )
_a : List[Any] = 'french fries'
_a : Dict = ldmad_pipe(**_a ,negative_prompt=_a )
_a, _a : Any = output.rgb, output.depth
_a : Tuple = rgb[0, -3:, -3:, -1]
_a : List[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_a : str = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
_a : Optional[int] = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : int="cpu" ,_a : Union[str, Any]=torch.floataa ,_a : str=0 ):
'''simple docstring'''
_a : str = torch.Generator(device=_a ).manual_seed(_a )
_a : List[str] = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
_a : int = torch.from_numpy(_a ).to(device=_a ,dtype=_a )
_a : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Any = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
_a : Optional[int] = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : Tuple = self.get_inputs(_a )
_a : Tuple = ldmad_pipe(**_a )
_a, _a : Dict = output.rgb, output.depth
_a : Union[str, Any] = rgb[0, -3:, -3:, -1].flatten()
_a : Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_a : int = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
_a : Optional[Any] = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : str ,_a : Union[str, Any] ,_a : Any="cpu" ,_a : Union[str, Any]=torch.floataa ,_a : Tuple=0 ):
'''simple docstring'''
_a : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
_a : Optional[Any] = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
_a : Any = torch.from_numpy(_a ).to(device=_a ,dtype=_a )
_a : Dict = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : int ):
'''simple docstring'''
_a : str = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : List[Any] = self.get_inputs(_a )
_a : Union[str, Any] = ldmad_pipe(**_a )
_a, _a : Optional[Any] = output.rgb, output.depth
_a : Dict = 0.49_5586
_a : Optional[Any] = 0.3379_5515
_a : str = 112.4_8518
_a : Optional[Any] = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : int = self.get_inputs(_a )
_a : Tuple = ldmad_pipe(**_a )
_a, _a : Optional[int] = output.rgb, output.depth
_a : Union[str, Any] = 0.419_4127
_a : Tuple = 0.3537_5586
_a : Tuple = 0.563_8502
_a : Tuple = 0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 229 | 1 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __a ( lowerCAmelCase__ : int ):
a__ : str = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( lowerCAmelCase__ : Tuple ):
a__ , a__ : str = emb.weight.shape
a__ : List[str] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
a__ : Optional[int] = emb.weight.data
return lin_layer
def __a ( lowerCAmelCase__ : List[Any] ):
a__ : int = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
a__ : int = Namespace(**checkpoint['''cfg''']['''model'''] )
a__ : Tuple = checkpoint['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
a__ : str = state_dict['''decoder.embed_tokens.weight'''].shape[0]
a__ : List[str] = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
a__ : List[str] = XGLMConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
a__ : List[str] = XGLMForCausalLM(lowerCAmelCase__ )
a__ : str = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
print(lowerCAmelCase__ )
a__ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 703 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( lowerCAmelCase__ : List[Any] ):
a__ : Union[str, Any] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
if "model" in sd.keys():
a__ : List[str] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
a__ : Optional[Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCAmelCase__ )
a__ : Any = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
a__ : Dict = sd.pop(lowerCAmelCase__ )
a__ : Union[str, Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
a__ : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
a__ : Optional[Any] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
a__ : List[str] = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
a__ : Optional[int] = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
a__ : Union[str, Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
a__ , a__ , a__ : Optional[int] = torch.split(lowerCAmelCase__ , depth // 3 , dim=0 )
a__ : Tuple = q
a__ : Union[str, Any] = k
a__ : Dict = v
del sd[key]
return sd
@torch.no_grad()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=None ):
a__ : Any = load_checkpoint(lowerCAmelCase__ )
if config is not None:
a__ : List[Any] = OPTConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Union[str, Any] = OPTConfig()
a__ : Union[str, Any] = OPTModel(lowerCAmelCase__ ).half().eval()
model.load_state_dict(lowerCAmelCase__ )
# Check results
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 340 | 0 |
class A :
'''simple docstring'''
def __init__(self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = {}
def lowerCamelCase__ (self : Union[str, Any] ) -> None:
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(_UpperCAmelCase , """ -> """ , """ -> """.join([str(_UpperCAmelCase ) for j in self.vertex[i]] ) )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> None:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_UpperCAmelCase )
else:
# else make a new vertex
lowercase__ = [to_vertex]
def lowerCamelCase__ (self : str ) -> None:
"""simple docstring"""
lowercase__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : int , _UpperCAmelCase : list ) -> None:
"""simple docstring"""
lowercase__ = True
print(_UpperCAmelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
A : Union[str, Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 15 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class lowerCAmelCase_ ( _lowercase , _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = "focalnet"
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=[192, 384, 768, 768] , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[3, 3, 3, 3] , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1e-4 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(**_SCREAMING_SNAKE_CASE )
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = embed_dim
__UpperCamelCase = use_conv_embed
__UpperCamelCase = hidden_sizes
__UpperCamelCase = depths
__UpperCamelCase = focal_levels
__UpperCamelCase = focal_windows
__UpperCamelCase = hidden_act
__UpperCamelCase = mlp_ratio
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = drop_path_rate
__UpperCamelCase = use_layerscale
__UpperCamelCase = layerscale_value
__UpperCamelCase = use_post_layernorm
__UpperCamelCase = use_post_layernorm_in_modulation
__UpperCamelCase = normalize_modulator
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = encoder_stride
__UpperCamelCase = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__UpperCamelCase , __UpperCamelCase = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 383 | 0 |
from __future__ import annotations
from collections.abc import Callable
_SCREAMING_SNAKE_CASE = list[list[float | int]]
def _snake_case (_snake_case : Optional[Any] , _snake_case : int) -> str:
_lowercase =len(_UpperCAmelCase)
_lowercase =[[0 for _ in range(size + 1)] for _ in range(_UpperCAmelCase)]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for row in range(_UpperCAmelCase):
for col in range(_UpperCAmelCase):
_lowercase =matrix[row][col]
_lowercase =vector[row][0]
_lowercase =0
_lowercase =0
while row < size and col < size:
# pivoting
_lowercase =max((abs(augmented[rowa][col]), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCAmelCase):
_lowercase =augmented[rowa][col] / augmented[row][col]
_lowercase =0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCAmelCase):
for row in range(_UpperCAmelCase):
_lowercase =augmented[row][col] / augmented[col][col]
for cola in range(_UpperCAmelCase , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(_UpperCAmelCase)
]
def _snake_case (_snake_case : str) -> Optional[int]:
_lowercase =len(_UpperCAmelCase)
_lowercase =[[0 for _ in range(_UpperCAmelCase)] for _ in range(_UpperCAmelCase)]
_lowercase =[[0] for _ in range(_UpperCAmelCase)]
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
for x_val, y_val in enumerate(_UpperCAmelCase):
for col in range(_UpperCAmelCase):
_lowercase =(x_val + 1) ** (size - col - 1)
_lowercase =y_val
_lowercase =solve(_UpperCAmelCase , _UpperCAmelCase)
def interpolated_func(_snake_case : str) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(_UpperCAmelCase))
return interpolated_func
def _snake_case (_snake_case : Optional[Any]) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _snake_case (_snake_case : Tuple = question_function , _snake_case : Union[str, Any] = 10) -> Optional[Any]:
_lowercase =[func(_UpperCAmelCase) for x_val in range(1 , order + 1)]
_lowercase =[
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
_lowercase =0
_lowercase =42
_lowercase =42
for poly in polynomials:
_lowercase =1
while func(_UpperCAmelCase) == poly(_UpperCAmelCase):
x_val += 1
ret += poly(_UpperCAmelCase)
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 557 | 0 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 512 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( __lowercase ):
def __init__( self : str , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def lowercase__ ( self : Optional[Any] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
__snake_case : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Optional[int] , __magic_name__ : str , __magic_name__ : Dict=1_60_00 , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="""pt""" , sampling_rate=__magic_name__ ).input_features.to(self.device )
__snake_case : List[str] = self.speech_model.generate(__magic_name__ , max_length=48_00_00 )
__snake_case : List[Any] = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Tuple = 1
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Optional[int] = len(__magic_name__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__magic_name__ )}.''' )
# get prompt text embeddings
__snake_case : Dict = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case : Any = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Any = text_embeddings.shape
__snake_case : List[Any] = text_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Optional[Any] = [""""""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !='''
f''' {type(__magic_name__ )}.''' )
elif isinstance(__magic_name__ , __magic_name__ ):
__snake_case : Dict = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case : int = negative_prompt
__snake_case : List[str] = text_input_ids.shape[-1]
__snake_case : Any = self.tokenizer(
__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="""pt""" , )
__snake_case : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Optional[int] = uncond_embeddings.shape[1]
__snake_case : Union[str, Any] = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : List[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[int] = torch.randn(__magic_name__ , generator=__magic_name__ , device="""cpu""" , dtype=__magic_name__ ).to(
self.device )
else:
__snake_case : int = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : List[str] = {}
if accepts_eta:
__snake_case : str = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Dict = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
__snake_case : Tuple = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
__snake_case : int = 1 / 0.18215 * latents
__snake_case : Optional[Any] = self.vae.decode(__magic_name__ ).sample
__snake_case : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__snake_case : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 26 | 0 |
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : str ) -> Optional[Any]:
lowerCamelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCamelCase_ = model_name.find('patch' )
lowerCamelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCamelCase_ = XCLIPVisionConfig(patch_size=_lowerCamelCase , num_frames=_lowerCamelCase )
if "large" in model_name:
lowerCamelCase_ = 768
lowerCamelCase_ = 3072
lowerCamelCase_ = 12
lowerCamelCase_ = 1024
lowerCamelCase_ = 4096
lowerCamelCase_ = 16
lowerCamelCase_ = 24
lowerCamelCase_ = 768
lowerCamelCase_ = 3072
if model_name == "xclip-large-patch14-16-frames":
lowerCamelCase_ = 336
lowerCamelCase_ = XCLIPConfig.from_text_vision_configs(_lowerCamelCase , _lowerCamelCase )
if "large" in model_name:
lowerCamelCase_ = 768
return config
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> Tuple:
# text encoder
if name == "token_embedding.weight":
lowerCamelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCamelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCamelCase_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCamelCase_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCamelCase_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCamelCase_ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCamelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCamelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCamelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCamelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCamelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCamelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCamelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCamelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCamelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCamelCase_ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCamelCase_ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCamelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCamelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCamelCase_ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCamelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCamelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : Tuple ) -> int:
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(_lowerCamelCase )
if "attn.in_proj" in key:
lowerCamelCase_ = key.split('.' )
if key.startswith('visual' ):
lowerCamelCase_ = key_split[3]
lowerCamelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCamelCase_ = val[
:dim, :
]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[
-dim:, :
]
else:
lowerCamelCase_ = val[
:dim
]
lowerCamelCase_ = val[
dim : dim * 2
]
lowerCamelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCamelCase_ = val[
:dim, :
]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[
-dim:, :
]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[
dim : dim * 2
]
lowerCamelCase_ = val[-dim:]
elif key.startswith('mit' ):
lowerCamelCase_ = key_split[2]
lowerCamelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = key_split[2]
lowerCamelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[
dim : dim * 2, :
]
lowerCamelCase_ = val[-dim:, :]
else:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[
dim : dim * 2
]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = rename_key(_lowerCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCamelCase_ = val.T
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase__ ( _lowerCamelCase : Any ) -> List[Any]:
if num_frames == 8:
lowerCamelCase_ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCamelCase_ = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCamelCase_ = 'eating_spaghetti_32_frames.npy'
lowerCamelCase_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=_lowerCamelCase , repo_type='dataset' , )
lowerCamelCase_ = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]=None , _lowerCamelCase : List[Any]=False ) -> Tuple:
lowerCamelCase_ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCamelCase_ = model_to_url[model_name]
lowerCamelCase_ = 8
if "16-frames" in model_name:
lowerCamelCase_ = 16
elif "shot" in model_name:
lowerCamelCase_ = 32
lowerCamelCase_ = get_xclip_config(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = XCLIPModel(_lowerCamelCase )
model.eval()
if "drive" in checkpoint_url:
lowerCamelCase_ = 'pytorch_model.bin'
gdown.cached_download(_lowerCamelCase , _lowerCamelCase , quiet=_lowerCamelCase )
lowerCamelCase_ = torch.load(_lowerCamelCase , map_location='cpu' )['model']
else:
lowerCamelCase_ = torch.hub.load_state_dict_from_url(_lowerCamelCase )['model']
lowerCamelCase_ = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = XCLIPModel(_lowerCamelCase )
lowerCamelCase_ , lowerCamelCase_ = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCamelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowerCamelCase_ = VideoMAEImageProcessor(size=_lowerCamelCase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCamelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCamelCase_ = XCLIPProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
lowerCamelCase_ = prepare_video(_lowerCamelCase )
lowerCamelCase_ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=_lowerCamelCase , return_tensors='pt' , padding=_lowerCamelCase )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCamelCase_ = model(**_lowerCamelCase )
# Verify outputs
lowerCamelCase_ = outputs.logits_per_video
lowerCamelCase_ = logits_per_video.softmax(dim=1 )
print('Probs:' , _lowerCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCamelCase_ = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCamelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCamelCase_ = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCamelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCamelCase_ = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCamelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCamelCase_ = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCamelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCamelCase_ = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCamelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCamelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCamelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCamelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCamelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCamelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCamelCase_ = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCamelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCamelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(_lowerCamelCase , organization='nielsr' )
processor.push_to_hub(_lowerCamelCase , organization='nielsr' )
slow_tokenizer.push_to_hub(_lowerCamelCase , organization='nielsr' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 137 |
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_SCREAMING_SNAKE_CASE : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE : int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 137 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.