code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase__ = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def _lowerCamelCase ( __a = "mumbai" ):
SCREAMING_SNAKE_CASE_ = BeautifulSoup(requests.get(url + location ).content, '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''', attrs={'''data-tn-component''': '''organicJob'''} ):
SCREAMING_SNAKE_CASE_ = job.find('''a''', attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
SCREAMING_SNAKE_CASE_ = job.find('''span''', {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''') | 628 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase__ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''maskformer'''
UpperCAmelCase__ = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ = ['''resnet''', '''swin''']
UpperCAmelCase__ = ['''detr''']
def __init__(self , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 20.0 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_ = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_ = (
decoder_config.pop('''model_type''' ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_ = fpn_feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_ = cross_entropy_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 628 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 628 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE_ = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE_ = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE_ = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE_ = 128
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ = 192
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ = 256
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ = 352
# set label information
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE_ = '''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = FocalNetConfig(
embed_dim=__a, depths=__a, focal_levels=__a, focal_windows=__a, use_conv_embed=__a, idalabel=__a, labelaid=__a, use_post_layernorm=__a, use_layerscale=__a, )
return config
def _lowerCamelCase ( __a ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ = '''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''encoder.layers''', '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''downsample.proj''', '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.f''', '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.h''', '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.proj''', '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ = '''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head''', '''classifier''' )
else:
SCREAMING_SNAKE_CASE_ = '''focalnet.''' + name
return name
def _lowerCamelCase ( __a, __a, __a=False ):
# fmt: off
SCREAMING_SNAKE_CASE_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''', __a )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__a, map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = get_focalnet_config(__a )
SCREAMING_SNAKE_CASE_ = FocalNetForImageClassification(__a )
model.eval()
# load state dict
model.load_state_dict(__a )
# verify conversion
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__a, size={'''shortest_edge''': 256}, resample=PILImageResampling.BILINEAR, do_center_crop=__a, crop_size=224, do_normalize=__a, image_mean=__a, image_std=__a, )
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
SCREAMING_SNAKE_CASE_ = processor(images=__a, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
SCREAMING_SNAKE_CASE_ = image_transforms(__a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, __a, atol=1E-4 )
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''', outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3], __a, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 628 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case ( unittest.TestCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def _lowercase (self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = DPTImageProcessor if is_vision_available() else None
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = DPTImageProcessingTester(self )
@property
def _lowercase (self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , ) | 628 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''glpn'''
def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[32, 64, 1_60, 2_56] , SCREAMING_SNAKE_CASE_=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE_=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=-1 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index | 628 | 1 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__a, max_perimeter + 1 ):
SCREAMING_SNAKE_CASE_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__a ):
SCREAMING_SNAKE_CASE_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCamelCase ( __a = 1_000 ):
SCREAMING_SNAKE_CASE_ = pythagorean_triple(__a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''') | 628 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 2_55.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
def _lowerCamelCase ( __a = 10 ):
if not isinstance(__a, __a ) or n < 0:
raise ValueError('''Invalid input''' )
SCREAMING_SNAKE_CASE_ = 10**n
SCREAMING_SNAKE_CASE_ = 28_433 * (pow(2, 7_830_457, __a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''') | 628 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case ( __lowercase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_="cls" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = project_dim
SCREAMING_SNAKE_CASE_ = pooler_fn
SCREAMING_SNAKE_CASE_ = learn_encoder
SCREAMING_SNAKE_CASE_ = use_attention_mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = [R'''pooler''', R'''logit_scale''']
UpperCAmelCase__ = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCAmelCase__ = '''roberta'''
UpperCAmelCase__ = RobertaSeriesConfig
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE_ , '''has_pre_transformation''' , SCREAMING_SNAKE_CASE_ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.base_model(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=SCREAMING_SNAKE_CASE_ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = outputs['''hidden_states'''][-2]
SCREAMING_SNAKE_CASE_ = self.pre_LN(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.transformation_pre(SCREAMING_SNAKE_CASE_ )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE_ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 628 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase__ = False
class snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 628 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase ( __a, __a=0.9_9_9, __a="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE_ = []
for i in range(__a ):
SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ), __a ) )
return torch.tensor(__a, dtype=torch.floataa )
class snake_case ( __lowercase , __lowercase ):
UpperCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ = 2
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_00_85 , SCREAMING_SNAKE_CASE_ = 0.0_12 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "linspace" , SCREAMING_SNAKE_CASE_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ = torch.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE_ = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = use_karras_sigmas
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ = self.timesteps
SCREAMING_SNAKE_CASE_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
SCREAMING_SNAKE_CASE_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(SCREAMING_SNAKE_CASE_ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.interp(SCREAMING_SNAKE_CASE_ , np.arange(0 , len(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE_ = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE_ , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = timesteps.to(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = timesteps.to(device=SCREAMING_SNAKE_CASE_ )
# empty dt and derivative
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ = defaultdict(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
# get distribution
SCREAMING_SNAKE_CASE_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ = low_idx + 1
SCREAMING_SNAKE_CASE_ = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ = np.clip(SCREAMING_SNAKE_CASE_ , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ = t.reshape(sigma.shape )
return t
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE_ = in_sigmas[0].item()
SCREAMING_SNAKE_CASE_ = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE_ = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE_ = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self ):
"""simple docstring"""
return self.dt is None
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ = derivative
SCREAMING_SNAKE_CASE_ = dt
SCREAMING_SNAKE_CASE_ = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE_ = self.dt
SCREAMING_SNAKE_CASE_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for t in timesteps]
SCREAMING_SNAKE_CASE_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps | 628 | 1 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _lowerCamelCase ( __a, __a ):
assert isinstance(__a, __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ = JsonDatasetReader(__a, cache_dir=__a, keep_in_memory=__a ).read()
_check_json_dataset(__a, __a )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ = JsonDatasetReader(__a, features=__a, cache_dir=__a ).read()
_check_json_dataset(__a, __a )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
], )
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
SCREAMING_SNAKE_CASE_ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ = JsonDatasetReader(__a, features=__a, cache_dir=__a ).read()
assert isinstance(__a, __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _lowerCamelCase ( __a, __a ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
SCREAMING_SNAKE_CASE_ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
SCREAMING_SNAKE_CASE_ = features.copy()
SCREAMING_SNAKE_CASE_ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ = JsonDatasetReader(__a, features=__a, cache_dir=__a ).read()
assert isinstance(__a, __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ = JsonDatasetReader(__a, cache_dir=__a, split=__a ).read()
_check_json_dataset(__a, __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''', [str, list] )
def _lowerCamelCase ( __a, __a, __a ):
if issubclass(__a, __a ):
SCREAMING_SNAKE_CASE_ = jsonl_path
elif issubclass(__a, __a ):
SCREAMING_SNAKE_CASE_ = [jsonl_path]
SCREAMING_SNAKE_CASE_ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ = JsonDatasetReader(__a, cache_dir=__a ).read()
_check_json_dataset(__a, __a )
def _lowerCamelCase ( __a, __a, __a=("train",) ):
assert isinstance(__a, __a )
for split in splits:
SCREAMING_SNAKE_CASE_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ = JsonDatasetReader({'''train''': jsonl_path}, cache_dir=__a, keep_in_memory=__a ).read()
_check_json_datasetdict(__a, __a )
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ = JsonDatasetReader({'''train''': jsonl_path}, features=__a, cache_dir=__a ).read()
_check_json_datasetdict(__a, __a )
@pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _lowerCamelCase ( __a, __a, __a ):
if split:
SCREAMING_SNAKE_CASE_ = {split: jsonl_path}
else:
SCREAMING_SNAKE_CASE_ = '''train'''
SCREAMING_SNAKE_CASE_ = {'''train''': jsonl_path, '''test''': jsonl_path}
SCREAMING_SNAKE_CASE_ = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ = JsonDatasetReader(__a, cache_dir=__a ).read()
_check_json_datasetdict(__a, __a, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _lowerCamelCase ( __a ):
return json.load(__a )
def _lowerCamelCase ( __a ):
return [json.loads(__a ) for line in buffer]
class snake_case :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE_ = load_json_function(SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , orient=SCREAMING_SNAKE_CASE_ ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE_ = load_json(SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE_ = load_json_function(SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert isinstance(exported_content[0] , SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , lines=SCREAMING_SNAKE_CASE_ , orient=SCREAMING_SNAKE_CASE_ , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE_ = load_json(SCREAMING_SNAKE_CASE_ )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(SCREAMING_SNAKE_CASE_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(SCREAMING_SNAKE_CASE_ ) == 10
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('''data''' ) / f'test.json.{extension}'
SCREAMING_SNAKE_CASE_ = str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compression=SCREAMING_SNAKE_CASE_ ).write()
with fsspec.open(SCREAMING_SNAKE_CASE_ , '''rb''' , compression='''infer''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
with fsspec.open(SCREAMING_SNAKE_CASE_ , '''rb''' , compression='''infer''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
assert exported_content == original_content | 628 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = 8
# DPR tok
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dataset''' )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
SCREAMING_SNAKE_CASE_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
SCREAMING_SNAKE_CASE_ = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary. | 628 | 1 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [0] * len(__a )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__a ) ):
if indegree[i] == 0:
queue.append(__a )
while queue:
SCREAMING_SNAKE_CASE_ = queue.pop(0 )
cnt += 1
topo.append(__a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__a )
if cnt != len(__a ):
print('''Cycle exists''' )
else:
print(__a )
# Adjacency List of Graph
lowerCAmelCase__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph) | 628 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = left
SCREAMING_SNAKE_CASE_ = right
def __repr__(self ):
"""simple docstring"""
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = collection
SCREAMING_SNAKE_CASE_ = function
if self.collection:
SCREAMING_SNAKE_CASE_ = self._build_tree(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self._update_tree(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self._query_range(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.collection[start] )
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = self._build_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE_ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.fn(node.left.val , node.right.val )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE_ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE_ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 628 | 1 |
"""simple docstring"""
def _lowerCamelCase ( __a = 50 ):
SCREAMING_SNAKE_CASE_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2, 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''') | 628 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0'
raise ValueError(__a )
SCREAMING_SNAKE_CASE_ = 1
for i in range(1, __a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 | 1 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _lowerCamelCase ( __a = 5_000 ):
SCREAMING_SNAKE_CASE_ = [(i * (3 * i - 1)) // 2 for i in range(1, __a )]
for i, pentagonal_i in enumerate(__a ):
for j in range(__a, len(__a ) ):
SCREAMING_SNAKE_CASE_ = pentagonal_nums[j]
SCREAMING_SNAKE_CASE_ = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE_ = pentagonal_j - pentagonal_i
if is_pentagonal(__a ) and is_pentagonal(__a ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''') | 628 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE_ = i + 1
else:
SCREAMING_SNAKE_CASE_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''') | 628 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case ( __lowercase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_="cls" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = project_dim
SCREAMING_SNAKE_CASE_ = pooler_fn
SCREAMING_SNAKE_CASE_ = learn_encoder
SCREAMING_SNAKE_CASE_ = use_attention_mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = [R'''pooler''', R'''logit_scale''']
UpperCAmelCase__ = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCAmelCase__ = '''roberta'''
UpperCAmelCase__ = RobertaSeriesConfig
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE_ , '''has_pre_transformation''' , SCREAMING_SNAKE_CASE_ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.base_model(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=SCREAMING_SNAKE_CASE_ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = outputs['''hidden_states'''][-2]
SCREAMING_SNAKE_CASE_ = self.pre_LN(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.transformation_pre(SCREAMING_SNAKE_CASE_ )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE_ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 628 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a=2, __a=3, __a=16, __a = 10, __a = 2 ):
def get_dataset(__a ):
SCREAMING_SNAKE_CASE_ = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(__a, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=None ):
SCREAMING_SNAKE_CASE_ = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = batch
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(__a, __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case ( nn.Module ):
def __init__(self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return x * self.a + self.b
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE_ = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE_ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE_ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase__ = DummyModel()
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCAmelCase__, lowerCAmelCase__ = dummy_dataloaders()
lowerCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 628 | 1 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
SCREAMING_SNAKE_CASE_ = deepcopy(SCREAMING_SNAKE_CASE_ )
elif os.path.exists(SCREAMING_SNAKE_CASE_ ):
with io.open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = json.load(SCREAMING_SNAKE_CASE_ )
else:
try:
SCREAMING_SNAKE_CASE_ = baseaa.urlsafe_baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
SCREAMING_SNAKE_CASE_ = json.loads(SCREAMING_SNAKE_CASE_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' )
SCREAMING_SNAKE_CASE_ = config
self.set_stage_and_offload()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_value('''zero_optimization.stage''' , -1 )
# offload
SCREAMING_SNAKE_CASE_ = False
if self.is_zeroa() or self.is_zeroa():
SCREAMING_SNAKE_CASE_ = set(['''cpu''', '''nvme'''] )
SCREAMING_SNAKE_CASE_ = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
SCREAMING_SNAKE_CASE_ = True
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE_ = ds_key_long.split('''.''' )
SCREAMING_SNAKE_CASE_ = nodes.pop()
for node in nodes:
SCREAMING_SNAKE_CASE_ = config.get(SCREAMING_SNAKE_CASE_ )
if config is None:
return None, ds_key
return config, ds_key
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.find_config_node(SCREAMING_SNAKE_CASE_ )
if config is None:
return default
return config.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE_ = ds_key_long.split('''.''' )
for node in nodes:
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = config.get(SCREAMING_SNAKE_CASE_ )
if config is None:
if must_exist:
raise ValueError(f'Can\'t find {ds_key_long} entry in the config: {self.config}' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_value(SCREAMING_SNAKE_CASE_ )
return False if value is None else bool(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_value(SCREAMING_SNAKE_CASE_ )
return False if value is None else not bool(SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
return self._stage == 2
def _lowercase (self ):
"""simple docstring"""
return self._stage == 3
def _lowercase (self ):
"""simple docstring"""
return self._offload
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = engine
def _lowercase (self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.engine.backward(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ , device_placement=SCREAMING_SNAKE_CASE_ , scaler=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = hasattr(self.optimizer , '''overflow''' )
def _lowercase (self , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _lowercase (self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _lowercase (self ):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.0_01 , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = params
SCREAMING_SNAKE_CASE_ = lr
SCREAMING_SNAKE_CASE_ = weight_decay
SCREAMING_SNAKE_CASE_ = kwargs
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = optimizer
SCREAMING_SNAKE_CASE_ = total_num_steps
SCREAMING_SNAKE_CASE_ = warmup_num_steps
SCREAMING_SNAKE_CASE_ = kwargs | 628 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _lowercase (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ = samplea.shape[0]
SCREAMING_SNAKE_CASE_ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop()
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 628 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter image url: ').strip()
print(f'''Downloading image from {url} ...''')
lowerCAmelCase__ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowerCAmelCase__ = soup.find('meta', {'property': 'og:image'})['content']
lowerCAmelCase__ = requests.get(image_url).content
lowerCAmelCase__ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''') | 628 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = TransfoXLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [0, 4, 8, 7] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' ) | 628 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _lowerCamelCase ( __a, __a=False ):
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'module.blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'module.blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'module.blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'module.blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'module.blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'module.blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _lowerCamelCase ( __a, __a, __a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ = ''''''
else:
SCREAMING_SNAKE_CASE_ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'module.blocks.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'module.blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__a, __a )
def _lowerCamelCase ( __a ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE_ = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__a, __a )
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = dct.pop(__a )
SCREAMING_SNAKE_CASE_ = val
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = ViTMSNConfig()
SCREAMING_SNAKE_CASE_ = 1_000
SCREAMING_SNAKE_CASE_ = '''datasets/huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 384
SCREAMING_SNAKE_CASE_ = 1_536
SCREAMING_SNAKE_CASE_ = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 1_024
SCREAMING_SNAKE_CASE_ = 4_096
SCREAMING_SNAKE_CASE_ = 24
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = 1_024
SCREAMING_SNAKE_CASE_ = 4_096
SCREAMING_SNAKE_CASE_ = 24
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = ViTMSNModel(__a )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__a, map_location='''cpu''' )['''target_encoder''']
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__a )
SCREAMING_SNAKE_CASE_ = create_rename_keys(__a, base_model=__a )
for src, dest in rename_keys:
rename_key(__a, __a, __a )
read_in_q_k_v(__a, __a, base_model=__a )
model.load_state_dict(__a )
model.eval()
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(
size=config.image_size, image_mean=__a, image_std=__a )
SCREAMING_SNAKE_CASE_ = image_processor(images=__a, return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3], __a, atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 628 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__a, max_perimeter + 1 ):
SCREAMING_SNAKE_CASE_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__a ):
SCREAMING_SNAKE_CASE_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCamelCase ( __a = 1_000 ):
SCREAMING_SNAKE_CASE_ = pythagorean_triple(__a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''') | 628 | 1 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = left
SCREAMING_SNAKE_CASE_ = right
def __repr__(self ):
"""simple docstring"""
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = collection
SCREAMING_SNAKE_CASE_ = function
if self.collection:
SCREAMING_SNAKE_CASE_ = self._build_tree(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self._update_tree(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self._query_range(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.collection[start] )
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = self._build_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE_ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.fn(node.left.val , node.right.val )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE_ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE_ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 628 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( __a ):
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__a, '''_dynamo''' ):
return False
return isinstance(__a, torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( __a, __a = True ):
SCREAMING_SNAKE_CASE_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ = is_compiled_module(__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ = getattr(__a, '''forward''' )
SCREAMING_SNAKE_CASE_ = model.__dict__.pop('''_original_forward''', __a )
if original_forward is not None:
while hasattr(__a, '''__wrapped__''' ):
SCREAMING_SNAKE_CASE_ = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ = forward
if getattr(__a, '''_converted_to_transformer_engine''', __a ):
convert_model(__a, to_transformer_engine=__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = compiled_model
return model
def _lowerCamelCase ( ):
PartialState().wait_for_everyone()
def _lowerCamelCase ( __a, __a ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__a, __a )
elif PartialState().local_process_index == 0:
torch.save(__a, __a )
@contextmanager
def _lowerCamelCase ( **__a ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ = str(__a )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( __a ):
if not hasattr(__a, '''__qualname__''' ) and not hasattr(__a, '''__name__''' ):
SCREAMING_SNAKE_CASE_ = getattr(__a, '''__class__''', __a )
if hasattr(__a, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__a, '''__name__''' ):
return obj.__name__
return str(__a )
def _lowerCamelCase ( __a, __a ):
for key, value in source.items():
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = destination.setdefault(__a, {} )
merge_dicts(__a, __a )
else:
SCREAMING_SNAKE_CASE_ = value
return destination
def _lowerCamelCase ( __a = None ):
if port is None:
SCREAMING_SNAKE_CASE_ = 29_500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0 | 628 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
SCREAMING_SNAKE_CASE_ = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).qformer_tokenizer
def _lowercase (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = processor(text=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = qformer_tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ = InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''lower newer'''
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) | 628 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
lowerCAmelCase__ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
lowerCAmelCase__ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
lowerCAmelCase__ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
lowerCAmelCase__ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
lowerCAmelCase__ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
lowerCAmelCase__ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
lowerCAmelCase__ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
lowerCAmelCase__ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 628 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __a, __a, __a, __a ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__a )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__a )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__a, __a, is_trivia_qa=__a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 628 | 1 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCAmelCase__ = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCAmelCase__ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowerCAmelCase__ = 'zero2'
lowerCAmelCase__ = 'zero3'
lowerCAmelCase__ = [ZEROa, ZEROa]
def _lowerCamelCase ( __a, __a, __a ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
SCREAMING_SNAKE_CASE_ = parameterized.to_safe_name('''_'''.join(str(__a ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowerCAmelCase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class snake_case ( __lowercase ):
@parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.run_and_check(
stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.run_and_check(
stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
@parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.run_and_check(
stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.run_and_check(
stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
pass
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = models[model]
SCREAMING_SNAKE_CASE_ = self.run_trainer(
stage=SCREAMING_SNAKE_CASE_ , model_name=SCREAMING_SNAKE_CASE_ , eval_steps=SCREAMING_SNAKE_CASE_ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
self.do_checks(SCREAMING_SNAKE_CASE_ )
return output_dir
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir('''./xxx''' , after=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(SCREAMING_SNAKE_CASE_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
SCREAMING_SNAKE_CASE_ = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
SCREAMING_SNAKE_CASE_ = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
SCREAMING_SNAKE_CASE_ = self.get_launcher(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=self.get_env() )
return output_dir
def _lowercase (self , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = min(2 , get_gpu_count() ) if distributed else 1
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split() | 628 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( __a ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''', __a, )
if isinstance(__a, torch.Tensor ):
return image
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [image]
if isinstance(image[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = image[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE_ = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = np.array(__a ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = image.transpose(0, 3, 1, 2 )
SCREAMING_SNAKE_CASE_ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(image[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return image
def _lowerCamelCase ( __a ):
if isinstance(__a, torch.Tensor ):
return mask
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [mask]
if isinstance(mask[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = mask[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_ = [np.array(m.convert('''L''' ).resize((w, h), resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(mask[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2_50 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = _preprocess_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ = original_image.shape
SCREAMING_SNAKE_CASE_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
SCREAMING_SNAKE_CASE_ = eta
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE_ = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE_ = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCAmelCase__ = 'examples/'
lowerCAmelCase__ = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowerCAmelCase__ = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
lowerCAmelCase__ = 'README.md'
def _lowerCamelCase ( __a, __a, __a ):
with open(__a, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ = replace.replace('''VERSION''', __a )
SCREAMING_SNAKE_CASE_ = re_pattern.sub(__a, __a )
with open(__a, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(__a )
def _lowerCamelCase ( __a ):
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a, __a ), __a, pattern='''examples''' )
def _lowerCamelCase ( __a, __a=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a, __a, __a )
if not patch:
update_version_in_examples(__a )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE_ = '''1. Want to contribute a new model?'''
with open(__a, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
SCREAMING_SNAKE_CASE_ = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''', '''https://huggingface.co/docs/diffusers/model_doc''', )
index += 1
with open(__a, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(__a )
def _lowerCamelCase ( ):
with open(REPLACE_FILES['''init'''], '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
SCREAMING_SNAKE_CASE_ = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def _lowerCamelCase ( __a=False ):
SCREAMING_SNAKE_CASE_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
SCREAMING_SNAKE_CASE_ = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ = input(F'Which version are you releasing? [{default_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ = default_version
print(F'Updating version to {version}.' )
global_version_update(__a, patch=__a )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = get_version()
SCREAMING_SNAKE_CASE_ = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
SCREAMING_SNAKE_CASE_ = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ = input(F'Which version are we developing now? [{dev_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ = dev_version
print(F'Updating version to {version}.' )
global_version_update(__a )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work() | 628 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a, __a ):
return (preds == labels).mean()
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCAmelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__a, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__a, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__a ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__a, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__a, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__a, args=__a, train_dataset=__a, eval_dataset=__a, compute_metrics=__a, data_collator=__a, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(__a, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', __a, __a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__a )
return results
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 628 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCamelCase ( __a, __a=False ):
SCREAMING_SNAKE_CASE_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
SCREAMING_SNAKE_CASE_ = '''segformer.encoder.''' + key
if key.startswith('''backbone''' ):
SCREAMING_SNAKE_CASE_ = key.replace('''backbone''', '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE_ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
SCREAMING_SNAKE_CASE_ = key.replace(F'patch_embed{idx}', F'patch_embeddings.{int(__a )-1}' )
if "norm" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''norm''', '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE_ = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
SCREAMING_SNAKE_CASE_ = key.replace(F'layer_norm{idx}', F'layer_norm.{int(__a )-1}' )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''layer_norm1''', '''layer_norm_1''' )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''layer_norm2''', '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE_ = key[key.find('''block''' ) + len('''block''' )]
SCREAMING_SNAKE_CASE_ = key.replace(F'block{idx}', F'block.{int(__a )-1}' )
if "attn.q" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''attn.q''', '''attention.self.query''' )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''attn.proj''', '''attention.output.dense''' )
if "attn" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''attn''', '''attention.self''' )
if "fc1" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''fc1''', '''dense1''' )
if "fc2" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''fc2''', '''dense2''' )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''linear_pred''', '''classifier''' )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE_ = key.replace('''linear_fuse.conv''', '''linear_fuse''' )
SCREAMING_SNAKE_CASE_ = key.replace('''linear_fuse.bn''', '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE_ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
SCREAMING_SNAKE_CASE_ = key.replace(F'linear_c{idx}', F'linear_c.{int(__a )-1}' )
if key.startswith('''head''' ):
SCREAMING_SNAKE_CASE_ = key.replace('''head''', '''classifier''' )
SCREAMING_SNAKE_CASE_ = value
return new_state_dict
def _lowerCamelCase ( __a, __a ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE_ = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE_ = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE_ = kv_bias[
config.hidden_sizes[i] :
]
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
return image
@torch.no_grad()
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = SegformerConfig()
SCREAMING_SNAKE_CASE_ = False
# set attributes based on model_name
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
if "segformer" in model_name:
SCREAMING_SNAKE_CASE_ = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
SCREAMING_SNAKE_CASE_ = 150
SCREAMING_SNAKE_CASE_ = '''ade20k-id2label.json'''
SCREAMING_SNAKE_CASE_ = (1, 150, 128, 128)
elif "city" in model_name:
SCREAMING_SNAKE_CASE_ = 19
SCREAMING_SNAKE_CASE_ = '''cityscapes-id2label.json'''
SCREAMING_SNAKE_CASE_ = (1, 19, 128, 128)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_name[4:6]
SCREAMING_SNAKE_CASE_ = 1_000
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = (1, 1_000)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ = 256
elif size == "b2":
SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ = 768
SCREAMING_SNAKE_CASE_ = [3, 4, 6, 3]
elif size == "b3":
SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ = 768
SCREAMING_SNAKE_CASE_ = [3, 4, 18, 3]
elif size == "b4":
SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ = 768
SCREAMING_SNAKE_CASE_ = [3, 8, 27, 3]
elif size == "b5":
SCREAMING_SNAKE_CASE_ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE_ = 768
SCREAMING_SNAKE_CASE_ = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
SCREAMING_SNAKE_CASE_ = SegformerImageProcessor(
image_scale=(512, 512), keep_ratio=__a, align=__a, do_random_crop=__a )
# prepare image
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=__a, return_tensors='''pt''' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
SCREAMING_SNAKE_CASE_ = torch.load(__a, map_location=torch.device('''cpu''' ) )
else:
SCREAMING_SNAKE_CASE_ = torch.load(__a, map_location=torch.device('''cpu''' ) )['''state_dict''']
# rename keys
SCREAMING_SNAKE_CASE_ = rename_keys(__a, encoder_only=__a )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__a, __a )
# create HuggingFace model and load state dict
if encoder_only:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = SegformerForImageClassification(__a )
else:
SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation(__a )
model.load_state_dict(__a )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
SCREAMING_SNAKE_CASE_ = logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3], __a, atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 628 |
"""simple docstring"""
import cva
import numpy as np
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__(self ):
"""simple docstring"""
return str(self.k )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(SCREAMING_SNAKE_CASE_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE_ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase__ = HarrisCorner(0.04, 3)
lowerCAmelCase__, lowerCAmelCase__ = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 628 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase__ = {ord(char) for char in VALID_CHARS}
lowerCAmelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for keychar, cipherchar in zip(cycle(__a ), __a ):
SCREAMING_SNAKE_CASE_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__a )
return decoded
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = []
for key in product(__a, repeat=3 ):
SCREAMING_SNAKE_CASE_ = try_key(__a, __a )
if encoded is not None:
possibles.append(__a )
return possibles
def _lowerCamelCase ( __a, __a ):
return [possible for possible in possibles if common_word in possible.lower()]
def _lowerCamelCase ( __a = "p059_cipher.txt" ):
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Path(__a ).parent.joinpath(__a ).read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE_ = [int(__a ) for number in data.strip().split(''',''' )]
SCREAMING_SNAKE_CASE_ = filter_valid_chars(__a )
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE_ = filter_common_word(__a, __a )
if len(__a ) == 1:
break
SCREAMING_SNAKE_CASE_ = possibles[0]
return sum(ord(__a ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''') | 628 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = pipe.image_variation(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 628 | 1 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger('transformers.models.speecht5')
def _lowerCamelCase ( __a, __a, __a ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_ = checkpoint['''input_conv.weight_g''']
SCREAMING_SNAKE_CASE_ = checkpoint['''input_conv.weight_v''']
SCREAMING_SNAKE_CASE_ = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_ = checkpoint[F'upsamples.{i}.1.weight_g']
SCREAMING_SNAKE_CASE_ = checkpoint[F'upsamples.{i}.1.weight_v']
SCREAMING_SNAKE_CASE_ = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
SCREAMING_SNAKE_CASE_ = checkpoint['''output_conv.1.weight_g''']
SCREAMING_SNAKE_CASE_ = checkpoint['''output_conv.1.weight_v''']
SCREAMING_SNAKE_CASE_ = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowerCamelCase ( __a, __a, __a, __a=None, __a=None, ):
if config_path is not None:
SCREAMING_SNAKE_CASE_ = SpeechTaHifiGanConfig.from_pretrained(__a )
else:
SCREAMING_SNAKE_CASE_ = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_ = SpeechTaHifiGan(__a )
SCREAMING_SNAKE_CASE_ = torch.load(__a )
load_weights(orig_checkpoint['''model''']['''generator'''], __a, __a )
SCREAMING_SNAKE_CASE_ = np.load(__a )
SCREAMING_SNAKE_CASE_ = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_ = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a ).float()
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a ).float()
model.save_pretrained(__a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 628 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return x.sum()
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [1, 2]
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': [1, 2], '''b''': [3, 4]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 1}, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = [2, 3]
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': [2, 3], '''b''': [4, 5]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 2}, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 2
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 0, '''c''': 2}
SCREAMING_SNAKE_CASE_ = {
'''a''': np.eye(2 ).astype(SCREAMING_SNAKE_CASE_ ),
'''b''': np.zeros(3 ).astype(SCREAMING_SNAKE_CASE_ ),
'''c''': np.ones(2 ).astype(SCREAMING_SNAKE_CASE_ ),
}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 1 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 3, '''b''': 4}
SCREAMING_SNAKE_CASE_ = {'''a''': 5, '''b''': 6}
SCREAMING_SNAKE_CASE_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
class snake_case :
UpperCAmelCase__ = '''bar'''
SCREAMING_SNAKE_CASE_ = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(SCREAMING_SNAKE_CASE_ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowerCamelCase ( __a, __a, __a ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE_ = {F'{i}': i for i in range(__a )}
SCREAMING_SNAKE_CASE_ = map_nested(lambda __a : x + 10, __a, num_proc=__a, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( __lowercase ):
@require_tf
def _lowercase (self ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE_ = layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE_ = tf.random.uniform((1, 3) )
return model(SCREAMING_SNAKE_CASE_ ).numpy()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _lowercase (self ):
"""simple docstring"""
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE_ = torch.rand(1 , 3 )
return model(SCREAMING_SNAKE_CASE_ ).detach().numpy()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _lowercase (self ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''', [{}] )
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''', [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
], )
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).flatten()
assert output == expected_output
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = A(x=1, y='''foobar''' )
SCREAMING_SNAKE_CASE_ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__a ) == expected_output
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__a ) == expected_output
with pytest.raises(__a ):
asdict([1, A(x=10, y='''foo''' )] )
def _lowerCamelCase ( __a ):
return text.split()
def _lowerCamelCase ( __a ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCamelCase ( ):
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = []
for yield_time, content in iflatmap_unordered(
__a, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__a )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__a ) == 4 | 628 | 1 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
SCREAMING_SNAKE_CASE_ = gray_code_sequence_string(__a )
#
# convert them to integers
for i in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ = int(sequence[i], 2 )
return sequence
def _lowerCamelCase ( __a ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
SCREAMING_SNAKE_CASE_ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
SCREAMING_SNAKE_CASE_ = gray_code_sequence_string(bit_count - 1 )
SCREAMING_SNAKE_CASE_ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
SCREAMING_SNAKE_CASE_ = '''0''' + smaller_sequence[i]
sequence.append(__a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
SCREAMING_SNAKE_CASE_ = '''1''' + smaller_sequence[i]
sequence.append(__a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase__ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''maskformer'''
UpperCAmelCase__ = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ = ['''resnet''', '''swin''']
UpperCAmelCase__ = ['''detr''']
def __init__(self , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 20.0 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_ = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_ = (
decoder_config.pop('''model_type''' ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_ = fpn_feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_ = cross_entropy_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 628 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['MobileNetV2FeatureExtractor']
lowerCAmelCase__ = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 628 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE_ = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE_ = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE_ = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE_ = 128
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ = 192
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ = 256
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ = 352
# set label information
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE_ = '''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = FocalNetConfig(
embed_dim=__a, depths=__a, focal_levels=__a, focal_windows=__a, use_conv_embed=__a, idalabel=__a, labelaid=__a, use_post_layernorm=__a, use_layerscale=__a, )
return config
def _lowerCamelCase ( __a ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ = '''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''encoder.layers''', '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''downsample.proj''', '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.f''', '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.h''', '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.proj''', '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ = '''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head''', '''classifier''' )
else:
SCREAMING_SNAKE_CASE_ = '''focalnet.''' + name
return name
def _lowerCamelCase ( __a, __a, __a=False ):
# fmt: off
SCREAMING_SNAKE_CASE_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''', __a )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__a, map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = get_focalnet_config(__a )
SCREAMING_SNAKE_CASE_ = FocalNetForImageClassification(__a )
model.eval()
# load state dict
model.load_state_dict(__a )
# verify conversion
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__a, size={'''shortest_edge''': 256}, resample=PILImageResampling.BILINEAR, do_center_crop=__a, crop_size=224, do_normalize=__a, image_mean=__a, image_std=__a, )
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
SCREAMING_SNAKE_CASE_ = processor(images=__a, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
SCREAMING_SNAKE_CASE_ = image_transforms(__a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, __a, atol=1E-4 )
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''', outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3], __a, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 628 | 1 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''t5'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__(self , SCREAMING_SNAKE_CASE_=3_21_28 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=1_28 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = d_kv
SCREAMING_SNAKE_CASE_ = d_ff
SCREAMING_SNAKE_CASE_ = num_layers
SCREAMING_SNAKE_CASE_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE_ = relative_attention_max_distance
SCREAMING_SNAKE_CASE_ = dropout_rate
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = initializer_factor
SCREAMING_SNAKE_CASE_ = feed_forward_proj
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = self.feed_forward_proj.split('''-''' )
SCREAMING_SNAKE_CASE_ = act_info[-1]
SCREAMING_SNAKE_CASE_ = act_info[0] == '''gated'''
if len(SCREAMING_SNAKE_CASE_ ) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE_ = '''gelu_new'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class snake_case ( __lowercase ):
@property
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
SCREAMING_SNAKE_CASE_ = '''past_encoder_sequence + sequence'''
SCREAMING_SNAKE_CASE_ = {0: '''batch'''}
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
return common_inputs
@property
def _lowercase (self ):
"""simple docstring"""
return 13 | 628 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''glpn'''
def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[32, 64, 1_60, 2_56] , SCREAMING_SNAKE_CASE_=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE_=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=-1 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index | 628 | 1 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def _lowerCamelCase ( __a ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 2_55.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 628 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case ( __lowercase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_="cls" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = project_dim
SCREAMING_SNAKE_CASE_ = pooler_fn
SCREAMING_SNAKE_CASE_ = learn_encoder
SCREAMING_SNAKE_CASE_ = use_attention_mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = [R'''pooler''', R'''logit_scale''']
UpperCAmelCase__ = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCAmelCase__ = '''roberta'''
UpperCAmelCase__ = RobertaSeriesConfig
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE_ , '''has_pre_transformation''' , SCREAMING_SNAKE_CASE_ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.base_model(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=SCREAMING_SNAKE_CASE_ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = outputs['''hidden_states'''][-2]
SCREAMING_SNAKE_CASE_ = self.pre_LN(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.transformation_pre(SCREAMING_SNAKE_CASE_ )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE_ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 628 | 1 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [0 for i in range(len(__a ) )]
# initialize interval's left pointer and right pointer
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 0, 0
for i in range(1, len(__a ) ):
# case when current index is inside the interval
if i <= right_pointer:
SCREAMING_SNAKE_CASE_ = min(right_pointer - i + 1, z_result[i - left_pointer] )
SCREAMING_SNAKE_CASE_ = min_edge
while go_next(__a, __a, __a ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = i, i + z_result[i] - 1
return z_result
def _lowerCamelCase ( __a, __a, __a ):
return i + z_result[i] < len(__a ) and s[z_result[i]] == s[i + z_result[i]]
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
SCREAMING_SNAKE_CASE_ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__a ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase ( __a, __a=0.9_9_9, __a="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE_ = []
for i in range(__a ):
SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ), __a ) )
return torch.tensor(__a, dtype=torch.floataa )
class snake_case ( __lowercase , __lowercase ):
UpperCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ = 2
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_00_85 , SCREAMING_SNAKE_CASE_ = 0.0_12 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "linspace" , SCREAMING_SNAKE_CASE_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ = torch.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE_ = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = use_karras_sigmas
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ = self.timesteps
SCREAMING_SNAKE_CASE_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
SCREAMING_SNAKE_CASE_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(SCREAMING_SNAKE_CASE_ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.interp(SCREAMING_SNAKE_CASE_ , np.arange(0 , len(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE_ = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE_ , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = timesteps.to(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = timesteps.to(device=SCREAMING_SNAKE_CASE_ )
# empty dt and derivative
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ = defaultdict(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
# get distribution
SCREAMING_SNAKE_CASE_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ = low_idx + 1
SCREAMING_SNAKE_CASE_ = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ = np.clip(SCREAMING_SNAKE_CASE_ , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ = t.reshape(sigma.shape )
return t
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE_ = in_sigmas[0].item()
SCREAMING_SNAKE_CASE_ = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE_ = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE_ = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self ):
"""simple docstring"""
return self.dt is None
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ = derivative
SCREAMING_SNAKE_CASE_ = dt
SCREAMING_SNAKE_CASE_ = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE_ = self.dt
SCREAMING_SNAKE_CASE_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for t in timesteps]
SCREAMING_SNAKE_CASE_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps | 628 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ''
lowerCAmelCase__ = ''
lowerCAmelCase__ = ''
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = get_dataset(__a, __a )
print('''Processing...''' )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = update_image_and_anno(__a, __a, __a )
for index, image in enumerate(__a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE_ = random_chars(32 )
SCREAMING_SNAKE_CASE_ = paths[index].split(os.sep )[-1].rsplit('''.''', 1 )[0]
SCREAMING_SNAKE_CASE_ = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg', __a, [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(__a )} with {file_name}' )
SCREAMING_SNAKE_CASE_ = []
for anno in new_annos[index]:
SCREAMING_SNAKE_CASE_ = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(__a )
with open(F'/{file_root}.txt', '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for label_file in glob.glob(os.path.join(__a, '''*.txt''' ) ):
SCREAMING_SNAKE_CASE_ = label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0]
with open(__a ) as in_file:
SCREAMING_SNAKE_CASE_ = in_file.readlines()
SCREAMING_SNAKE_CASE_ = os.path.join(__a, F'{label_name}.jpg' )
SCREAMING_SNAKE_CASE_ = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE_ = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__a )
labels.append(__a )
return img_paths, labels
def _lowerCamelCase ( __a, __a, __a = 1 ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for idx in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img_list[idx]
path_list.append(__a )
SCREAMING_SNAKE_CASE_ = anno_list[idx]
SCREAMING_SNAKE_CASE_ = cva.imread(__a )
if flip_type == 1:
SCREAMING_SNAKE_CASE_ = cva.flip(__a, __a )
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
SCREAMING_SNAKE_CASE_ = cva.flip(__a, __a )
for bbox in img_annos:
SCREAMING_SNAKE_CASE_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__a )
new_imgs_list.append(__a )
return new_imgs_list, new_annos_lists, path_list
def _lowerCamelCase ( __a = 32 ):
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE_ = ascii_lowercase + digits
return "".join(random.choice(__a ) for _ in range(__a ) )
if __name__ == "__main__":
main()
print('DONE ✅') | 628 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = 8
# DPR tok
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dataset''' )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
SCREAMING_SNAKE_CASE_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
SCREAMING_SNAKE_CASE_ = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary. | 628 | 1 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( __a ):
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__a, '''_dynamo''' ):
return False
return isinstance(__a, torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( __a, __a = True ):
SCREAMING_SNAKE_CASE_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ = is_compiled_module(__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ = getattr(__a, '''forward''' )
SCREAMING_SNAKE_CASE_ = model.__dict__.pop('''_original_forward''', __a )
if original_forward is not None:
while hasattr(__a, '''__wrapped__''' ):
SCREAMING_SNAKE_CASE_ = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ = forward
if getattr(__a, '''_converted_to_transformer_engine''', __a ):
convert_model(__a, to_transformer_engine=__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = compiled_model
return model
def _lowerCamelCase ( ):
PartialState().wait_for_everyone()
def _lowerCamelCase ( __a, __a ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__a, __a )
elif PartialState().local_process_index == 0:
torch.save(__a, __a )
@contextmanager
def _lowerCamelCase ( **__a ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ = str(__a )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( __a ):
if not hasattr(__a, '''__qualname__''' ) and not hasattr(__a, '''__name__''' ):
SCREAMING_SNAKE_CASE_ = getattr(__a, '''__class__''', __a )
if hasattr(__a, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__a, '''__name__''' ):
return obj.__name__
return str(__a )
def _lowerCamelCase ( __a, __a ):
for key, value in source.items():
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = destination.setdefault(__a, {} )
merge_dicts(__a, __a )
else:
SCREAMING_SNAKE_CASE_ = value
return destination
def _lowerCamelCase ( __a = None ):
if port is None:
SCREAMING_SNAKE_CASE_ = 29_500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0 | 628 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = left
SCREAMING_SNAKE_CASE_ = right
def __repr__(self ):
"""simple docstring"""
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = collection
SCREAMING_SNAKE_CASE_ = function
if self.collection:
SCREAMING_SNAKE_CASE_ = self._build_tree(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self._update_tree(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self._query_range(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.collection[start] )
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = self._build_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE_ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.fn(node.left.val , node.right.val )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE_ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE_ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 628 | 1 |
"""simple docstring"""
def _lowerCamelCase ( __a, __a, __a ):
if exponent == 1:
return base
if exponent % 2 == 0:
SCREAMING_SNAKE_CASE_ = _modexpt(__a, exponent // 2, __a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a, exponent - 1, __a )) % modulo_value
def _lowerCamelCase ( __a = 1_777, __a = 1_855, __a = 8 ):
SCREAMING_SNAKE_CASE_ = base
for _ in range(1, __a ):
SCREAMING_SNAKE_CASE_ = _modexpt(__a, __a, 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''') | 628 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0'
raise ValueError(__a )
SCREAMING_SNAKE_CASE_ = 1
for i in range(1, __a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 | 1 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _lowerCamelCase ( __a, __a, __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE_ = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE_ = ya
SCREAMING_SNAKE_CASE_ = xa
for k in range(__a ):
SCREAMING_SNAKE_CASE_ = y[k] + step_size * ode_func(__a, y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE_ = i + 1
else:
SCREAMING_SNAKE_CASE_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''') | 628 | 1 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _lowercase (self , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.random.RandomState(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = 3 * [inputs['''prompt''']]
# forward
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = output.images[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = 3 * [inputs.pop('''prompt''' )]
SCREAMING_SNAKE_CASE_ = pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , )
SCREAMING_SNAKE_CASE_ = text_inputs['''input_ids''']
SCREAMING_SNAKE_CASE_ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
SCREAMING_SNAKE_CASE_ = prompt_embeds
# forward
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = 3 * ['''this is a negative prompt''']
SCREAMING_SNAKE_CASE_ = negative_prompt
SCREAMING_SNAKE_CASE_ = 3 * [inputs['''prompt''']]
# forward
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = output.images[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ = 3 * [inputs.pop('''prompt''' )]
SCREAMING_SNAKE_CASE_ = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE_ = pipe.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , )
SCREAMING_SNAKE_CASE_ = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = embeds
# forward
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case ( unittest.TestCase ):
@property
def _lowercase (self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ = False
return options
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''open neural network exchange'''
SCREAMING_SNAKE_CASE_ = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''open neural network exchange'''
SCREAMING_SNAKE_CASE_ = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
def test_callback_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
SCREAMING_SNAKE_CASE_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE_ = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = np.array(
[-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE_ = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = np.array(
[-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''Andromeda galaxy in a bottle'''
SCREAMING_SNAKE_CASE_ = np.random.RandomState(0 )
pipe(
prompt=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE_ , callback=SCREAMING_SNAKE_CASE_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_ = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None | 628 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a=2, __a=3, __a=16, __a = 10, __a = 2 ):
def get_dataset(__a ):
SCREAMING_SNAKE_CASE_ = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(__a, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=None ):
SCREAMING_SNAKE_CASE_ = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = batch
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(__a, __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case ( nn.Module ):
def __init__(self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return x * self.a + self.b
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE_ = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE_ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE_ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase__ = DummyModel()
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCAmelCase__, lowerCAmelCase__ = dummy_dataloaders()
lowerCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 628 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''num_attention_heads''' ) )
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1_28, 2_56, 3_84] , SCREAMING_SNAKE_CASE_=[4, 6, 8] , SCREAMING_SNAKE_CASE_=[2, 3, 4] , SCREAMING_SNAKE_CASE_=[16, 16, 16] , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=[2, 2, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2] , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=2 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = kernel_size
SCREAMING_SNAKE_CASE_ = stride
SCREAMING_SNAKE_CASE_ = padding
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = key_dim
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = attention_ratio
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = initializer_range
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _lowercase (self ):
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LevitModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = LevitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LevitModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def _lowercase (self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase (self ):
"""simple docstring"""
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def _lowercase (self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def _lowercase (self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE_ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE_ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
SCREAMING_SNAKE_CASE_ = problem_type['''title''']
SCREAMING_SNAKE_CASE_ = problem_type['''num_labels''']
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE_ = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
SCREAMING_SNAKE_CASE_ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as warning_list:
SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def _lowercase (self ):
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = LevitModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def _lowercase (self ):
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) | 628 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _lowercase (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ = samplea.shape[0]
SCREAMING_SNAKE_CASE_ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop()
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 628 | 1 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return x.sum()
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [1, 2]
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': [1, 2], '''b''': [3, 4]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 1}, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = [2, 3]
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': [2, 3], '''b''': [4, 5]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 2}, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 2
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 0, '''c''': 2}
SCREAMING_SNAKE_CASE_ = {
'''a''': np.eye(2 ).astype(SCREAMING_SNAKE_CASE_ ),
'''b''': np.zeros(3 ).astype(SCREAMING_SNAKE_CASE_ ),
'''c''': np.ones(2 ).astype(SCREAMING_SNAKE_CASE_ ),
}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 1 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 3, '''b''': 4}
SCREAMING_SNAKE_CASE_ = {'''a''': 5, '''b''': 6}
SCREAMING_SNAKE_CASE_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
class snake_case :
UpperCAmelCase__ = '''bar'''
SCREAMING_SNAKE_CASE_ = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(SCREAMING_SNAKE_CASE_ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowerCamelCase ( __a, __a, __a ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE_ = {F'{i}': i for i in range(__a )}
SCREAMING_SNAKE_CASE_ = map_nested(lambda __a : x + 10, __a, num_proc=__a, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( __lowercase ):
@require_tf
def _lowercase (self ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE_ = layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE_ = tf.random.uniform((1, 3) )
return model(SCREAMING_SNAKE_CASE_ ).numpy()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _lowercase (self ):
"""simple docstring"""
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE_ = torch.rand(1 , 3 )
return model(SCREAMING_SNAKE_CASE_ ).detach().numpy()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _lowercase (self ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''', [{}] )
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''', [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
], )
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).flatten()
assert output == expected_output
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = A(x=1, y='''foobar''' )
SCREAMING_SNAKE_CASE_ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__a ) == expected_output
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__a ) == expected_output
with pytest.raises(__a ):
asdict([1, A(x=10, y='''foo''' )] )
def _lowerCamelCase ( __a ):
return text.split()
def _lowerCamelCase ( __a ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCamelCase ( ):
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = []
for yield_time, content in iflatmap_unordered(
__a, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__a )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__a ) == 4 | 628 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = TransfoXLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [0, 4, 8, 7] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' ) | 628 | 1 |
"""simple docstring"""
import baseaa
def _lowerCamelCase ( __a ):
return baseaa.baaencode(string.encode('''utf-8''' ) )
def _lowerCamelCase ( __a ):
return baseaa.baadecode(__a ).decode('''utf-8''' )
if __name__ == "__main__":
lowerCAmelCase__ = 'Hello World!'
lowerCAmelCase__ = baseaa_encode(test)
print(encoded)
lowerCAmelCase__ = baseaa_decode(encoded)
print(decoded) | 628 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__a, max_perimeter + 1 ):
SCREAMING_SNAKE_CASE_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__a ):
SCREAMING_SNAKE_CASE_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCamelCase ( __a = 1_000 ):
SCREAMING_SNAKE_CASE_ = pythagorean_triple(__a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''') | 628 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowerCAmelCase__ = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None) | 628 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( __a ):
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__a, '''_dynamo''' ):
return False
return isinstance(__a, torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( __a, __a = True ):
SCREAMING_SNAKE_CASE_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ = is_compiled_module(__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ = getattr(__a, '''forward''' )
SCREAMING_SNAKE_CASE_ = model.__dict__.pop('''_original_forward''', __a )
if original_forward is not None:
while hasattr(__a, '''__wrapped__''' ):
SCREAMING_SNAKE_CASE_ = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ = forward
if getattr(__a, '''_converted_to_transformer_engine''', __a ):
convert_model(__a, to_transformer_engine=__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = compiled_model
return model
def _lowerCamelCase ( ):
PartialState().wait_for_everyone()
def _lowerCamelCase ( __a, __a ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__a, __a )
elif PartialState().local_process_index == 0:
torch.save(__a, __a )
@contextmanager
def _lowerCamelCase ( **__a ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ = str(__a )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( __a ):
if not hasattr(__a, '''__qualname__''' ) and not hasattr(__a, '''__name__''' ):
SCREAMING_SNAKE_CASE_ = getattr(__a, '''__class__''', __a )
if hasattr(__a, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__a, '''__name__''' ):
return obj.__name__
return str(__a )
def _lowerCamelCase ( __a, __a ):
for key, value in source.items():
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = destination.setdefault(__a, {} )
merge_dicts(__a, __a )
else:
SCREAMING_SNAKE_CASE_ = value
return destination
def _lowerCamelCase ( __a = None ):
if port is None:
SCREAMING_SNAKE_CASE_ = 29_500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0 | 628 | 1 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase__ = logging.getLogger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''masked_bert'''
def __init__(self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-1_2 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="topK" , SCREAMING_SNAKE_CASE_="constant" , SCREAMING_SNAKE_CASE_=0.0 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = pruning_method
SCREAMING_SNAKE_CASE_ = mask_init
SCREAMING_SNAKE_CASE_ = mask_scale | 628 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
# Imports
import numpy as np
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
self.set_matricies(red=SCREAMING_SNAKE_CASE_ , green=SCREAMING_SNAKE_CASE_ , blue=SCREAMING_SNAKE_CASE_ , red_edge=SCREAMING_SNAKE_CASE_ , nir=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if red is not None:
SCREAMING_SNAKE_CASE_ = red
if green is not None:
SCREAMING_SNAKE_CASE_ = green
if blue is not None:
SCREAMING_SNAKE_CASE_ = blue
if red_edge is not None:
SCREAMING_SNAKE_CASE_ = red_edge
if nir is not None:
SCREAMING_SNAKE_CASE_ = nir
return True
def _lowercase (self , SCREAMING_SNAKE_CASE_="" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
self.set_matricies(red=SCREAMING_SNAKE_CASE_ , green=SCREAMING_SNAKE_CASE_ , blue=SCREAMING_SNAKE_CASE_ , red_edge=SCREAMING_SNAKE_CASE_ , nir=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def _lowercase (self ):
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _lowercase (self ):
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _lowercase (self ):
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def _lowercase (self ):
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _lowercase (self ):
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def _lowercase (self ):
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def _lowercase (self ):
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _lowercase (self ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def _lowercase (self ):
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _lowercase (self ):
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _lowercase (self ):
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _lowercase (self ):
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _lowercase (self , SCREAMING_SNAKE_CASE_=0.08 , SCREAMING_SNAKE_CASE_=1.22 , SCREAMING_SNAKE_CASE_=0.03 ):
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _lowercase (self ):
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _lowercase (self ):
"""simple docstring"""
return (self.nir / self.green) - 1
def _lowercase (self ):
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def _lowercase (self ):
"""simple docstring"""
return (self.red - self.blue) / self.red
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _lowercase (self ):
"""simple docstring"""
return self.nir - self.green
def _lowercase (self ):
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def _lowercase (self , SCREAMING_SNAKE_CASE_=0.16 ):
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def _lowercase (self , SCREAMING_SNAKE_CASE_=0.5 ):
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _lowercase (self ):
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _lowercase (self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def _lowercase (self ):
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _lowercase (self ):
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def _lowercase (self ):
"""simple docstring"""
return self.nir / self.red
def _lowercase (self ):
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def _lowercase (self ):
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _lowercase (self ):
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def _lowercase (self ):
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def _lowercase (self ):
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def _lowercase (self ):
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def _lowercase (self ):
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
SCREAMING_SNAKE_CASE_ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _lowercase (self ):
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _lowercase (self ):
"""simple docstring"""
return self.nir / self.red
def _lowercase (self ):
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def _lowercase (self ):
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 628 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __a, __a, __a, __a ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__a )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__a )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__a, __a, is_trivia_qa=__a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 628 | 1 |
"""simple docstring"""
def _lowerCamelCase ( __a, __a, __a ):
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(__a, __a ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
SCREAMING_SNAKE_CASE_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
SCREAMING_SNAKE_CASE_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( __a ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''', __a, )
if isinstance(__a, torch.Tensor ):
return image
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [image]
if isinstance(image[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = image[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE_ = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = np.array(__a ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = image.transpose(0, 3, 1, 2 )
SCREAMING_SNAKE_CASE_ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(image[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return image
def _lowerCamelCase ( __a ):
if isinstance(__a, torch.Tensor ):
return mask
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [mask]
if isinstance(mask[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = mask[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_ = [np.array(m.convert('''L''' ).resize((w, h), resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(mask[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2_50 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = _preprocess_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ = original_image.shape
SCREAMING_SNAKE_CASE_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
SCREAMING_SNAKE_CASE_ = eta
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE_ = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE_ = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ = 'Muhammad Umer Farooq'
lowerCAmelCase__ = 'MIT'
lowerCAmelCase__ = '1.0.0'
lowerCAmelCase__ = 'Muhammad Umer Farooq'
lowerCAmelCase__ = 'contact@muhammadumerfarooq.me'
lowerCAmelCase__ = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = domain
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
SCREAMING_SNAKE_CASE_ = parse.urljoin(self.domain , SCREAMING_SNAKE_CASE_ )
self.urls.append(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( __a ):
return ".".join(get_sub_domain_name(__a ).split('''.''' )[-2:] )
def _lowerCamelCase ( __a ):
return parse.urlparse(__a ).netloc
def _lowerCamelCase ( __a = "https://github.com" ):
SCREAMING_SNAKE_CASE_ = get_domain_name(__a )
# Initialize the parser
SCREAMING_SNAKE_CASE_ = Parser(__a )
try:
# Open URL
SCREAMING_SNAKE_CASE_ = requests.get(__a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
SCREAMING_SNAKE_CASE_ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
SCREAMING_SNAKE_CASE_ = requests.get(__a )
# Get the valid email.
SCREAMING_SNAKE_CASE_ = re.findall('''[a-zA-Z0-9]+@''' + domain, read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__a )
if __name__ == "__main__":
lowerCAmelCase__ = emails_from_url('https://github.com')
print(f'''{len(emails)} emails found:''')
print('\n'.join(sorted(emails))) | 628 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a, __a ):
return (preds == labels).mean()
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCAmelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__a, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__a, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__a ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__a, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__a, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__a, args=__a, train_dataset=__a, eval_dataset=__a, compute_metrics=__a, data_collator=__a, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(__a, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', __a, __a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__a )
return results
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 628 | 1 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class snake_case ( __lowercase ):
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
requires_backends(self , '''decord''' )
self.check_model_type(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
if frame_sampling_rate is not None:
SCREAMING_SNAKE_CASE_ = frame_sampling_rate
if num_frames is not None:
SCREAMING_SNAKE_CASE_ = num_frames
SCREAMING_SNAKE_CASE_ = {}
if top_k is not None:
SCREAMING_SNAKE_CASE_ = top_k
return preprocess_params, {}, postprocess_params
def __call__(self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1 ):
"""simple docstring"""
if num_frames is None:
SCREAMING_SNAKE_CASE_ = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
SCREAMING_SNAKE_CASE_ = BytesIO(requests.get(SCREAMING_SNAKE_CASE_ ).content )
SCREAMING_SNAKE_CASE_ = VideoReader(SCREAMING_SNAKE_CASE_ )
videoreader.seek(0 )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = num_frames * frame_sampling_rate - 1
SCREAMING_SNAKE_CASE_ = np.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num=SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = videoreader.get_batch(SCREAMING_SNAKE_CASE_ ).asnumpy()
SCREAMING_SNAKE_CASE_ = list(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework )
return model_inputs
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model(**SCREAMING_SNAKE_CASE_ )
return model_outputs
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE_ = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ = model_outputs.logits.softmax(-1 )[0]
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = probs.topk(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
SCREAMING_SNAKE_CASE_ = scores.tolist()
SCREAMING_SNAKE_CASE_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] | 628 |
"""simple docstring"""
import cva
import numpy as np
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__(self ):
"""simple docstring"""
return str(self.k )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(SCREAMING_SNAKE_CASE_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE_ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase__ = HarrisCorner(0.04, 3)
lowerCAmelCase__, lowerCAmelCase__ = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 628 | 1 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
lowerCAmelCase__ = Path(__file__).parent / 'model_card_template.md'
lowerCAmelCase__ = uuida().hex
lowerCAmelCase__ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase__ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowerCamelCase ( __a = None ):
SCREAMING_SNAKE_CASE_ = F'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'; torch/{_torch_version}'
if is_flax_available():
ua += F'; jax/{_jax_version}'
ua += F'; flax/{_flax_version}'
if is_onnx_available():
ua += F'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''', '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a, __a ):
ua += "; " + "; ".join(F'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(__a, __a ):
ua += "; " + user_agent
return ua
def _lowerCamelCase ( __a, __a = None, __a = None ):
if token is None:
SCREAMING_SNAKE_CASE_ = HfFolder.get_token()
if organization is None:
SCREAMING_SNAKE_CASE_ = whoami(__a )['''name''']
return F'{username}/{model_id}'
else:
return F'{organization}/{model_id}'
def _lowerCamelCase ( __a, __a ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__a, '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
SCREAMING_SNAKE_CASE_ = args.hub_token if hasattr(__a, '''hub_token''' ) else None
SCREAMING_SNAKE_CASE_ = get_full_repo_name(__a, token=__a )
SCREAMING_SNAKE_CASE_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''', license='''apache-2.0''', library_name='''diffusers''', tags=[], datasets=args.dataset_name, metrics=[], ), template_path=__a, model_name=__a, repo_name=__a, dataset_name=args.dataset_name if hasattr(__a, '''dataset_name''' ) else None, learning_rate=args.learning_rate, train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a, '''gradient_accumulation_steps''' ) else None
), adam_betaa=args.adam_betaa if hasattr(__a, '''adam_beta1''' ) else None, adam_betaa=args.adam_betaa if hasattr(__a, '''adam_beta2''' ) else None, adam_weight_decay=args.adam_weight_decay if hasattr(__a, '''adam_weight_decay''' ) else None, adam_epsilon=args.adam_epsilon if hasattr(__a, '''adam_epsilon''' ) else None, lr_scheduler=args.lr_scheduler if hasattr(__a, '''lr_scheduler''' ) else None, lr_warmup_steps=args.lr_warmup_steps if hasattr(__a, '''lr_warmup_steps''' ) else None, ema_inv_gamma=args.ema_inv_gamma if hasattr(__a, '''ema_inv_gamma''' ) else None, ema_power=args.ema_power if hasattr(__a, '''ema_power''' ) else None, ema_max_decay=args.ema_max_decay if hasattr(__a, '''ema_max_decay''' ) else None, mixed_precision=args.mixed_precision, )
SCREAMING_SNAKE_CASE_ = os.path.join(args.output_dir, '''README.md''' )
model_card.save(__a )
def _lowerCamelCase ( __a, __a = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
SCREAMING_SNAKE_CASE_ = str(Path(__a ).as_posix() )
SCREAMING_SNAKE_CASE_ = re.search(r'''snapshots/([^/]+)/''', __a )
if search is None:
return None
SCREAMING_SNAKE_CASE_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase__ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
lowerCAmelCase__ = os.path.join(hf_cache_home, 'diffusers')
def _lowerCamelCase ( __a = None, __a = None ):
if new_cache_dir is None:
SCREAMING_SNAKE_CASE_ = DIFFUSERS_CACHE
if old_cache_dir is None:
SCREAMING_SNAKE_CASE_ = old_diffusers_cache
SCREAMING_SNAKE_CASE_ = Path(__a ).expanduser()
SCREAMING_SNAKE_CASE_ = Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
SCREAMING_SNAKE_CASE_ = new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a, exist_ok=__a )
os.replace(__a, __a )
try:
os.symlink(__a, __a )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase__ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
lowerCAmelCase__ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase__ = int(f.read())
except ValueError:
lowerCAmelCase__ = 0
if cache_version < 1:
lowerCAmelCase__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
lowerCAmelCase__ = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def _lowerCamelCase ( __a, __a = None ):
if variant is not None:
SCREAMING_SNAKE_CASE_ = weights_name.split('''.''' )
SCREAMING_SNAKE_CASE_ = splits[:-1] + [variant] + splits[-1:]
SCREAMING_SNAKE_CASE_ = '''.'''.join(__a )
return weights_name
def _lowerCamelCase ( __a, *,
__a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a=None, ):
SCREAMING_SNAKE_CASE_ = str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a, __a ) ):
# Load from a PyTorch checkpoint
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a, __a, __a ) ):
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a, __a )
return model_file
else:
raise EnvironmentError(
F'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('''0.20.0''' )
):
try:
SCREAMING_SNAKE_CASE_ = hf_hub_download(
__a, filename=_add_variant(__a, __a ), cache_dir=__a, force_download=__a, proxies=__a, resume_download=__a, local_files_only=__a, use_auth_token=__a, user_agent=__a, subfolder=__a, revision=revision or commit_hash, )
warnings.warn(
F'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.', __a, )
return model_file
except: # noqa: E722
warnings.warn(
F'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a, __a )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__a, __a )}\' so that the correct variant file can be added.', __a, )
try:
# 2. Load model file as usual
SCREAMING_SNAKE_CASE_ = hf_hub_download(
__a, filename=__a, cache_dir=__a, force_download=__a, proxies=__a, resume_download=__a, local_files_only=__a, use_auth_token=__a, user_agent=__a, subfolder=__a, revision=revision or commit_hash, )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'''this model name. Check the model page at '''
F'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
F'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
F'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
F' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
F' directory containing a file named {weights_name} or'
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
F'containing a file named {weights_name}' ) | 628 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = pipe.image_variation(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 628 | 1 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowerCAmelCase__ = {'facebook/blenderbot_small-90M': 512}
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ = char
SCREAMING_SNAKE_CASE_ = set(__a )
return pairs
class snake_case ( __lowercase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="__start__" , SCREAMING_SNAKE_CASE_="__end__" , SCREAMING_SNAKE_CASE_="__unk__" , SCREAMING_SNAKE_CASE_="__null__" , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE_ = merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE_ = [tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = {}
@property
def _lowercase (self ):
"""simple docstring"""
return len(self.encoder )
def _lowercase (self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE_ )
if "\n" in token:
SCREAMING_SNAKE_CASE_ = token.replace('''\n''' , ''' __newln__''' )
SCREAMING_SNAKE_CASE_ = token.split(''' ''' )
SCREAMING_SNAKE_CASE_ = []
for token in tokens:
if not len(SCREAMING_SNAKE_CASE_ ):
continue
SCREAMING_SNAKE_CASE_ = token.lower()
SCREAMING_SNAKE_CASE_ = tuple(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
words.append(SCREAMING_SNAKE_CASE_ )
continue
while True:
SCREAMING_SNAKE_CASE_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = bigram
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
SCREAMING_SNAKE_CASE_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ = tuple(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ = get_pairs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = word[:-4]
SCREAMING_SNAKE_CASE_ = word
words.append(SCREAMING_SNAKE_CASE_ )
return " ".join(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = token.lower()
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
SCREAMING_SNAKE_CASE_ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE_ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file | 628 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return x.sum()
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [1, 2]
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': [1, 2], '''b''': [3, 4]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 1}, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = [2, 3]
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': [2, 3], '''b''': [4, 5]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 2}, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 2
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 0, '''c''': 2}
SCREAMING_SNAKE_CASE_ = {
'''a''': np.eye(2 ).astype(SCREAMING_SNAKE_CASE_ ),
'''b''': np.zeros(3 ).astype(SCREAMING_SNAKE_CASE_ ),
'''c''': np.ones(2 ).astype(SCREAMING_SNAKE_CASE_ ),
}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 1 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 3, '''b''': 4}
SCREAMING_SNAKE_CASE_ = {'''a''': 5, '''b''': 6}
SCREAMING_SNAKE_CASE_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
class snake_case :
UpperCAmelCase__ = '''bar'''
SCREAMING_SNAKE_CASE_ = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(SCREAMING_SNAKE_CASE_ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowerCamelCase ( __a, __a, __a ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE_ = {F'{i}': i for i in range(__a )}
SCREAMING_SNAKE_CASE_ = map_nested(lambda __a : x + 10, __a, num_proc=__a, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( __lowercase ):
@require_tf
def _lowercase (self ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE_ = layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE_ = tf.random.uniform((1, 3) )
return model(SCREAMING_SNAKE_CASE_ ).numpy()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _lowercase (self ):
"""simple docstring"""
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE_ = torch.rand(1 , 3 )
return model(SCREAMING_SNAKE_CASE_ ).detach().numpy()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _lowercase (self ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''', [{}] )
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''', [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
], )
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).flatten()
assert output == expected_output
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = A(x=1, y='''foobar''' )
SCREAMING_SNAKE_CASE_ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__a ) == expected_output
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__a ) == expected_output
with pytest.raises(__a ):
asdict([1, A(x=10, y='''foo''' )] )
def _lowerCamelCase ( __a ):
return text.split()
def _lowerCamelCase ( __a ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCamelCase ( ):
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = []
for yield_time, content in iflatmap_unordered(
__a, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__a )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__a ) == 4 | 628 | 1 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _lowerCamelCase ( __a, __a=False ):
SCREAMING_SNAKE_CASE_ = OmegaConf.load(__a )
if display:
print(yaml.dump(OmegaConf.to_container(__a ) ) )
return config
def _lowerCamelCase ( __a, __a=None, __a=None ):
if conf_path is None:
SCREAMING_SNAKE_CASE_ = '''./model_checkpoints/vqgan_only.yaml'''
SCREAMING_SNAKE_CASE_ = load_config(__a, display=__a )
SCREAMING_SNAKE_CASE_ = VQModel(**config.model.params )
if ckpt_path is None:
SCREAMING_SNAKE_CASE_ = '''./model_checkpoints/vqgan_only.pt'''
SCREAMING_SNAKE_CASE_ = torch.load(__a, map_location=__a )
if ".ckpt" in ckpt_path:
SCREAMING_SNAKE_CASE_ = sd['''state_dict''']
model.load_state_dict(__a, strict=__a )
model.to(__a )
del sd
return model
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = model.encode(__a )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
SCREAMING_SNAKE_CASE_ = model.decode(__a )
return xrec
def _lowerCamelCase ( __a, __a=False ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = string.rsplit('''.''', 1 )
if reload:
SCREAMING_SNAKE_CASE_ = importlib.import_module(__a )
importlib.reload(__a )
return getattr(importlib.import_module(__a, package=__a ), cls )
def _lowerCamelCase ( __a ):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''', {} ) )
def _lowerCamelCase ( __a, __a, __a=True, __a=True ):
SCREAMING_SNAKE_CASE_ = instantiate_from_config(__a )
if sd is not None:
model.load_state_dict(__a )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _lowerCamelCase ( __a, __a, __a, __a ):
# load the specified checkpoint
if ckpt:
SCREAMING_SNAKE_CASE_ = torch.load(__a, map_location='''cpu''' )
SCREAMING_SNAKE_CASE_ = pl_sd['''global_step''']
print(F'loaded model from global step {global_step}.' )
else:
SCREAMING_SNAKE_CASE_ = {'''state_dict''': None}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = load_model_from_config(config.model, pl_sd['''state_dict'''], gpu=__a, eval_mode=__a )['''model''']
return model, global_step | 628 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase__ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''maskformer'''
UpperCAmelCase__ = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ = ['''resnet''', '''swin''']
UpperCAmelCase__ = ['''detr''']
def __init__(self , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 20.0 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_ = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_ = (
decoder_config.pop('''model_type''' ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_ = fpn_feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_ = cross_entropy_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 628 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def _lowerCamelCase ( __a, __a = 16, __a = "bert-base-cased" ):
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(__a )
SCREAMING_SNAKE_CASE_ = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=__a, max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ = datasets.map(
__a, batched=__a, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(__a, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets['''train'''], shuffle=__a, collate_fn=__a, batch_size=__a )
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets['''validation'''], shuffle=__a, collate_fn=__a, batch_size=__a )
return train_dataloader, eval_dataloader
def _lowerCamelCase ( __a, __a, __a, __a ):
model.eval()
SCREAMING_SNAKE_CASE_ = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
SCREAMING_SNAKE_CASE_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a, references=__a, )
SCREAMING_SNAKE_CASE_ = metric.compute()
return eval_metric["accuracy"]
def _lowerCamelCase ( __a, __a ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ = config['''lr''']
SCREAMING_SNAKE_CASE_ = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE_ = int(config['''seed'''] )
SCREAMING_SNAKE_CASE_ = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE_ = args.model_name_or_path
set_seed(__a )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = get_dataloaders(__a, __a, __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained(__a, return_dict=__a )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE_ = optimizer_cls(params=model.parameters(), lr=__a )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE_ = get_linear_schedule_with_warmup(
optimizer=__a, num_warmup_steps=0, num_training_steps=__a, )
else:
SCREAMING_SNAKE_CASE_ = DummyScheduler(__a, total_num_steps=__a, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
__a, __a, __a, __a, __a )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE_ = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = evaluate.load('''glue''', '''mrpc''' )
SCREAMING_SNAKE_CASE_ = num_epochs
if args.partial_train_epoch is not None:
SCREAMING_SNAKE_CASE_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE_ = args.resume_from_checkpoint.split('''epoch_''' )[1]
SCREAMING_SNAKE_CASE_ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
SCREAMING_SNAKE_CASE_ = int(__a ) + 1
SCREAMING_SNAKE_CASE_ = evaluation_loop(__a, __a, __a, __a )
accelerator.print('''resumed checkpoint performance:''', __a )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir, F'state_{starting_epoch-1}.json' ), '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__a )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
SCREAMING_SNAKE_CASE_ = {}
for epoch in range(__a, __a ):
model.train()
for step, batch in enumerate(__a ):
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.loss
SCREAMING_SNAKE_CASE_ = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
SCREAMING_SNAKE_CASE_ = F'epoch_{epoch}'
SCREAMING_SNAKE_CASE_ = os.path.join(args.output_dir, __a )
accelerator.save_state(__a )
SCREAMING_SNAKE_CASE_ = evaluation_loop(__a, __a, __a, __a )
SCREAMING_SNAKE_CASE_ = accuracy
SCREAMING_SNAKE_CASE_ = lr_scheduler.get_lr()[0]
SCREAMING_SNAKE_CASE_ = optimizer.param_groups[0]['''lr''']
SCREAMING_SNAKE_CASE_ = epoch
SCREAMING_SNAKE_CASE_ = overall_step
accelerator.print(F'epoch {epoch}:', __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, F'state_{epoch}.json' ), '''w''' ) as f:
json.dump(__a, __a )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=__a, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=__a, )
parser.add_argument(
'''--output_dir''', type=__a, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--resume_from_checkpoint''', type=__a, default=__a, help='''If the training should continue from a checkpoint folder.''', )
parser.add_argument(
'''--partial_train_epoch''', type=__a, default=__a, help='''If passed, the training will stop after this number of epochs.''', )
parser.add_argument(
'''--num_epochs''', type=__a, default=2, help='''Number of train epochs.''', )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a, __a )
if __name__ == "__main__":
main() | 628 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE_ = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE_ = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE_ = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE_ = 128
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ = 192
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ = 256
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ = 352
# set label information
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE_ = '''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = FocalNetConfig(
embed_dim=__a, depths=__a, focal_levels=__a, focal_windows=__a, use_conv_embed=__a, idalabel=__a, labelaid=__a, use_post_layernorm=__a, use_layerscale=__a, )
return config
def _lowerCamelCase ( __a ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ = '''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''encoder.layers''', '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''downsample.proj''', '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.f''', '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.h''', '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.proj''', '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ = '''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head''', '''classifier''' )
else:
SCREAMING_SNAKE_CASE_ = '''focalnet.''' + name
return name
def _lowerCamelCase ( __a, __a, __a=False ):
# fmt: off
SCREAMING_SNAKE_CASE_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''', __a )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__a, map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = get_focalnet_config(__a )
SCREAMING_SNAKE_CASE_ = FocalNetForImageClassification(__a )
model.eval()
# load state dict
model.load_state_dict(__a )
# verify conversion
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__a, size={'''shortest_edge''': 256}, resample=PILImageResampling.BILINEAR, do_center_crop=__a, crop_size=224, do_normalize=__a, image_mean=__a, image_std=__a, )
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
SCREAMING_SNAKE_CASE_ = processor(images=__a, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
SCREAMING_SNAKE_CASE_ = image_transforms(__a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, __a, atol=1E-4 )
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''', outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3], __a, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 628 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyImgaImgPipeline
UpperCAmelCase__ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase__ = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
SCREAMING_SNAKE_CASE_ = MultilingualCLIP(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = text_encoder.eval()
return text_encoder
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE_ = DDIMScheduler(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(SCREAMING_SNAKE_CASE_ )
# create init_image
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_ = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE_ = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE_ = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = pipeline(
SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 628 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''glpn'''
def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[32, 64, 1_60, 2_56] , SCREAMING_SNAKE_CASE_=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE_=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=-1 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index | 628 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = RoCBertTokenizer
UpperCAmelCase__ = None
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = filter_non_english
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = {}
for i, value in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_ ) , [5, 6, 2, 5, 7, 8] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RoCBertBasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
SCREAMING_SNAKE_CASE_ = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = RoCBertWordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def _lowercase (self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _lowercase (self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _lowercase (self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def _lowercase (self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_ , '''do_lower_case''' ) else False
SCREAMING_SNAKE_CASE_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''的''', '''人''', '''有''']
SCREAMING_SNAKE_CASE_ = ''''''.join(SCREAMING_SNAKE_CASE_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE_ = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''你好''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''你是谁''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
SCREAMING_SNAKE_CASE_ = '''你好,你是谁'''
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_shape_ids(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_pronunciation_ids(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.prepare_for_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 628 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 2_55.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'''-m''', '''--pretrained_model_name_or_path''', type=__a, default=__a, required=__a, help='''Path to pretrained model or model identifier from huggingface.co/models.''', )
parser.add_argument(
'''-c''', '''--caption''', type=__a, default='''robotic cat with wings''', help='''Text used to generate images.''', )
parser.add_argument(
'''-n''', '''--images_num''', type=__a, default=4, help='''How much images to generate.''', )
parser.add_argument(
'''-s''', '''--seed''', type=__a, default=42, help='''Seed for random process.''', )
parser.add_argument(
'''-ci''', '''--cuda_id''', type=__a, default=0, help='''cuda_id.''', )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
return args
def _lowerCamelCase ( __a, __a, __a ):
if not len(__a ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = imgs[0].size
SCREAMING_SNAKE_CASE_ = Image.new('''RGB''', size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = grid.size
for i, img in enumerate(__a ):
grid.paste(__a, box=(i % cols * w, i // cols * h) )
return grid
def _lowerCamelCase ( __a, __a="robotic cat with wings", __a=7.5, __a=50, __a=1, __a=42, ):
SCREAMING_SNAKE_CASE_ = torch.Generator(pipeline.device ).manual_seed(__a )
SCREAMING_SNAKE_CASE_ = pipeline(
__a, guidance_scale=__a, num_inference_steps=__a, generator=__a, num_images_per_prompt=__a, ).images
SCREAMING_SNAKE_CASE_ = int(math.sqrt(__a ) )
SCREAMING_SNAKE_CASE_ = image_grid(__a, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
lowerCAmelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCAmelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowerCAmelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowerCAmelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowerCAmelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCAmelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowerCAmelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowerCAmelCase__ = unet.to(torch.device('cuda', args.cuda_id))
lowerCAmelCase__ = pipeline.to(unet.device)
lowerCAmelCase__, lowerCAmelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowerCAmelCase__ = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1))) | 628 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case ( __lowercase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_="cls" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = project_dim
SCREAMING_SNAKE_CASE_ = pooler_fn
SCREAMING_SNAKE_CASE_ = learn_encoder
SCREAMING_SNAKE_CASE_ = use_attention_mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = [R'''pooler''', R'''logit_scale''']
UpperCAmelCase__ = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCAmelCase__ = '''roberta'''
UpperCAmelCase__ = RobertaSeriesConfig
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE_ , '''has_pre_transformation''' , SCREAMING_SNAKE_CASE_ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.base_model(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=SCREAMING_SNAKE_CASE_ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = outputs['''hidden_states'''][-2]
SCREAMING_SNAKE_CASE_ = self.pre_LN(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.transformation_pre(SCREAMING_SNAKE_CASE_ )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE_ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 628 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
SCREAMING_SNAKE_CASE_ = ksize + 1
SCREAMING_SNAKE_CASE_ = np.zeros((ksize, ksize), dtype=np.floataa )
# each value
for y in range(__a ):
for x in range(__a ):
# distance from center
SCREAMING_SNAKE_CASE_ = x - ksize // 2
SCREAMING_SNAKE_CASE_ = y - ksize // 2
# degree to radiant
SCREAMING_SNAKE_CASE_ = theta / 180 * np.pi
SCREAMING_SNAKE_CASE_ = np.cos(_theta )
SCREAMING_SNAKE_CASE_ = np.sin(_theta )
# get kernel x
SCREAMING_SNAKE_CASE_ = cos_theta * px + sin_theta * py
# get kernel y
SCREAMING_SNAKE_CASE_ = -sin_theta * px + cos_theta * py
# fill kernel
SCREAMING_SNAKE_CASE_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCAmelCase__ = imread('../image_data/lena.jpg')
# turn image in gray scale value
lowerCAmelCase__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCAmelCase__ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCAmelCase__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCAmelCase__ = out / out.max() * 255
lowerCAmelCase__ = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0) | 628 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase ( __a, __a=0.9_9_9, __a="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE_ = []
for i in range(__a ):
SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ), __a ) )
return torch.tensor(__a, dtype=torch.floataa )
class snake_case ( __lowercase , __lowercase ):
UpperCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ = 2
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_00_85 , SCREAMING_SNAKE_CASE_ = 0.0_12 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "linspace" , SCREAMING_SNAKE_CASE_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ = torch.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE_ = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = use_karras_sigmas
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ = self.timesteps
SCREAMING_SNAKE_CASE_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
SCREAMING_SNAKE_CASE_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(SCREAMING_SNAKE_CASE_ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.interp(SCREAMING_SNAKE_CASE_ , np.arange(0 , len(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE_ = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE_ , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = timesteps.to(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = timesteps.to(device=SCREAMING_SNAKE_CASE_ )
# empty dt and derivative
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ = defaultdict(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
# get distribution
SCREAMING_SNAKE_CASE_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ = low_idx + 1
SCREAMING_SNAKE_CASE_ = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ = np.clip(SCREAMING_SNAKE_CASE_ , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ = t.reshape(sigma.shape )
return t
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE_ = in_sigmas[0].item()
SCREAMING_SNAKE_CASE_ = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE_ = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE_ = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self ):
"""simple docstring"""
return self.dt is None
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ = derivative
SCREAMING_SNAKE_CASE_ = dt
SCREAMING_SNAKE_CASE_ = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE_ = self.dt
SCREAMING_SNAKE_CASE_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for t in timesteps]
SCREAMING_SNAKE_CASE_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps | 628 | 1 |
"""simple docstring"""
lowerCAmelCase__ = 8.314_4598
def _lowerCamelCase ( __a, __a ):
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCAmelCase__ = 300
lowerCAmelCase__ = 28
lowerCAmelCase__ = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''') | 628 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = 8
# DPR tok
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dataset''' )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
SCREAMING_SNAKE_CASE_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
SCREAMING_SNAKE_CASE_ = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary. | 628 | 1 |
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class snake_case ( unittest.TestCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
def _lowercase (self ):
"""simple docstring"""
return {}
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
SCREAMING_SNAKE_CASE_ = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = MarkupLMFeatureExtractor if is_bsa_available() else None
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = MarkupLMFeatureExtractionTester(self )
@property
def _lowercase (self ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class()
# Test not batched input
SCREAMING_SNAKE_CASE_ = get_html_strings()[0]
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ )
# fmt: off
SCREAMING_SNAKE_CASE_ = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
SCREAMING_SNAKE_CASE_ = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.xpaths , SCREAMING_SNAKE_CASE_ )
# Test batched
SCREAMING_SNAKE_CASE_ = get_html_strings()
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ )
# fmt: off
SCREAMING_SNAKE_CASE_ = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
SCREAMING_SNAKE_CASE_ = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , SCREAMING_SNAKE_CASE_ )
self.assertEqual(encoding.xpaths , SCREAMING_SNAKE_CASE_ ) | 628 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = left
SCREAMING_SNAKE_CASE_ = right
def __repr__(self ):
"""simple docstring"""
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = collection
SCREAMING_SNAKE_CASE_ = function
if self.collection:
SCREAMING_SNAKE_CASE_ = self._build_tree(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self._update_tree(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self._query_range(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.collection[start] )
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = self._build_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE_ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.fn(node.left.val , node.right.val )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE_ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE_ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 628 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''donut-swin'''
UpperCAmelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) ) | 628 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0'
raise ValueError(__a )
SCREAMING_SNAKE_CASE_ = 1
for i in range(1, __a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 | 1 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCAmelCase__ = 'src/transformers'
lowerCAmelCase__ = 'docs/source/en'
lowerCAmelCase__ = '.'
def _lowerCamelCase ( __a, __a, __a ):
with open(__a, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ = 0
while not lines[start_index].startswith(__a ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ = start_index
while not lines[end_index].startswith(__a ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCAmelCase__ = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase__ = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCAmelCase__ = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase__ = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''', __a )
return [m.group(0 ) for m in matches]
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = 2 if text == '''✅''' or text == '''❌''' else len(__a )
SCREAMING_SNAKE_CASE_ = (width - text_length) // 2
SCREAMING_SNAKE_CASE_ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
SCREAMING_SNAKE_CASE_ = {name: config.replace('''Config''', '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE_ = collections.defaultdict(__a )
SCREAMING_SNAKE_CASE_ = collections.defaultdict(__a )
SCREAMING_SNAKE_CASE_ = collections.defaultdict(__a )
SCREAMING_SNAKE_CASE_ = collections.defaultdict(__a )
SCREAMING_SNAKE_CASE_ = collections.defaultdict(__a )
# Let's lookup through all transformers object (once).
for attr_name in dir(__a ):
SCREAMING_SNAKE_CASE_ = None
if attr_name.endswith('''Tokenizer''' ):
SCREAMING_SNAKE_CASE_ = slow_tokenizers
SCREAMING_SNAKE_CASE_ = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
SCREAMING_SNAKE_CASE_ = fast_tokenizers
SCREAMING_SNAKE_CASE_ = attr_name[:-13]
elif _re_tf_models.match(__a ) is not None:
SCREAMING_SNAKE_CASE_ = tf_models
SCREAMING_SNAKE_CASE_ = _re_tf_models.match(__a ).groups()[0]
elif _re_flax_models.match(__a ) is not None:
SCREAMING_SNAKE_CASE_ = flax_models
SCREAMING_SNAKE_CASE_ = _re_flax_models.match(__a ).groups()[0]
elif _re_pt_models.match(__a ) is not None:
SCREAMING_SNAKE_CASE_ = pt_models
SCREAMING_SNAKE_CASE_ = _re_pt_models.match(__a ).groups()[0]
if lookup_dict is not None:
while len(__a ) > 0:
if attr_name in model_name_to_prefix.values():
SCREAMING_SNAKE_CASE_ = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE_ = ''''''.join(camel_case_split(__a )[:-1] )
# Let's build that table!
SCREAMING_SNAKE_CASE_ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
SCREAMING_SNAKE_CASE_ = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
SCREAMING_SNAKE_CASE_ = [len(__a ) + 2 for c in columns]
SCREAMING_SNAKE_CASE_ = max([len(__a ) for name in model_names] ) + 2
# Build the table per se
SCREAMING_SNAKE_CASE_ = '''|''' + '''|'''.join([_center_text(__a, __a ) for c, w in zip(__a, __a )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
SCREAMING_SNAKE_CASE_ = {True: '''✅''', False: '''❌'''}
for name in model_names:
SCREAMING_SNAKE_CASE_ = model_name_to_prefix[name]
SCREAMING_SNAKE_CASE_ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__a, __a ) for l, w in zip(__a, __a )] ) + "|\n"
return table
def _lowerCamelCase ( __a=False ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = _find_text_in_file(
filename=os.path.join(__a, '''index.md''' ), start_prompt='''<!--This table is updated automatically from the auto modules''', end_prompt='''<!-- End table-->''', )
SCREAMING_SNAKE_CASE_ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__a, '''index.md''' ), '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite) | 628 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE_ = i + 1
else:
SCREAMING_SNAKE_CASE_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''') | 628 | 1 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case ( __lowercase ):
def __init__(self , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = eval_examples
SCREAMING_SNAKE_CASE_ = post_process_function
def _lowercase (self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = "eval" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ = self.get_eval_dataloader(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ = self.compute_metrics
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE_ = time.time()
try:
SCREAMING_SNAKE_CASE_ = eval_loop(
SCREAMING_SNAKE_CASE_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE_ , metric_key_prefix=SCREAMING_SNAKE_CASE_ , )
finally:
SCREAMING_SNAKE_CASE_ = compute_metrics
SCREAMING_SNAKE_CASE_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
SCREAMING_SNAKE_CASE_ = self.post_process_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , output.predictions )
SCREAMING_SNAKE_CASE_ = self.compute_metrics(SCREAMING_SNAKE_CASE_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
SCREAMING_SNAKE_CASE_ = metrics.pop(SCREAMING_SNAKE_CASE_ )
metrics.update(output.metrics )
else:
SCREAMING_SNAKE_CASE_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(SCREAMING_SNAKE_CASE_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , SCREAMING_SNAKE_CASE_ )
return metrics
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = "test" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_test_dataloader(SCREAMING_SNAKE_CASE_ )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ = self.compute_metrics
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
SCREAMING_SNAKE_CASE_ = time.time()
try:
SCREAMING_SNAKE_CASE_ = eval_loop(
SCREAMING_SNAKE_CASE_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE_ , metric_key_prefix=SCREAMING_SNAKE_CASE_ , )
finally:
SCREAMING_SNAKE_CASE_ = compute_metrics
SCREAMING_SNAKE_CASE_ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ = self.post_process_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , output.predictions , '''predict''' )
SCREAMING_SNAKE_CASE_ = self.compute_metrics(SCREAMING_SNAKE_CASE_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
SCREAMING_SNAKE_CASE_ = metrics.pop(SCREAMING_SNAKE_CASE_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=SCREAMING_SNAKE_CASE_ ) | 628 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a=2, __a=3, __a=16, __a = 10, __a = 2 ):
def get_dataset(__a ):
SCREAMING_SNAKE_CASE_ = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(__a, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=None ):
SCREAMING_SNAKE_CASE_ = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = batch
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(__a, __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case ( nn.Module ):
def __init__(self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return x * self.a + self.b
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE_ = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE_ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE_ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase__ = DummyModel()
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCAmelCase__, lowerCAmelCase__ = dummy_dataloaders()
lowerCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 628 | 1 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowerCamelCase ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__a ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def _lowerCamelCase ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def _lowerCamelCase ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__a ):
http_head('''https://huggingface.co''' ) | 628 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _lowercase (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ = samplea.shape[0]
SCREAMING_SNAKE_CASE_ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop()
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 628 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCAmelCase__ = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 628 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = TransfoXLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [0, 4, 8, 7] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' ) | 628 | 1 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = None # Added in order to delete a node easier
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def __repr__(self ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'{self.value}': (self.left, self.right)} , indent=1 )
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = root
def __str__(self ):
"""simple docstring"""
return str(self.root )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if new_children is not None: # reset its kids
SCREAMING_SNAKE_CASE_ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(SCREAMING_SNAKE_CASE_ ): # If it is the right children
SCREAMING_SNAKE_CASE_ = new_children
else:
SCREAMING_SNAKE_CASE_ = new_children
else:
SCREAMING_SNAKE_CASE_ = new_children
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase (self ):
"""simple docstring"""
return self.root is None
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Node(SCREAMING_SNAKE_CASE_ ) # create a new Node
if self.empty(): # if Tree is empty
SCREAMING_SNAKE_CASE_ = new_node # set its root
else: # Tree is not empty
SCREAMING_SNAKE_CASE_ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
SCREAMING_SNAKE_CASE_ = new_node # We insert the new node in a leaf
break
else:
SCREAMING_SNAKE_CASE_ = parent_node.left
else:
if parent_node.right is None:
SCREAMING_SNAKE_CASE_ = new_node
break
else:
SCREAMING_SNAKE_CASE_ = parent_node.right
SCREAMING_SNAKE_CASE_ = parent_node
def _lowercase (self , *SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
for value in values:
self.__insert(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
SCREAMING_SNAKE_CASE_ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
SCREAMING_SNAKE_CASE_ = node.left if value < node.value else node.right
return node
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
if node is None:
if self.root is None:
return None
SCREAMING_SNAKE_CASE_ = self.root
if not self.empty():
while node.right is not None:
SCREAMING_SNAKE_CASE_ = node.right
return node
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
if node is None:
SCREAMING_SNAKE_CASE_ = self.root
if self.root is None:
return None
if not self.empty():
SCREAMING_SNAKE_CASE_ = self.root
while node.left is not None:
SCREAMING_SNAKE_CASE_ = node.left
return node
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.search(SCREAMING_SNAKE_CASE_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(SCREAMING_SNAKE_CASE_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(SCREAMING_SNAKE_CASE_ , node.left )
else:
SCREAMING_SNAKE_CASE_ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
SCREAMING_SNAKE_CASE_ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase (self , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node:
self.inorder(SCREAMING_SNAKE_CASE_ , node.left )
arr.append(node.value )
self.inorder(SCREAMING_SNAKE_CASE_ , node.right )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
self.inorder(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # append all values to list using inorder traversal
return arr[k - 1]
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = []
if curr_node is not None:
SCREAMING_SNAKE_CASE_ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
SCREAMING_SNAKE_CASE_ = BinarySearchTree()
for i in testlist:
t.insert(__a )
# Prints all the elements of the list in order traversal
print(__a )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''', t.get_max().value ) # type: ignore
print('''Min Value: ''', t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__a )
print(__a )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 628 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__a, max_perimeter + 1 ):
SCREAMING_SNAKE_CASE_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__a ):
SCREAMING_SNAKE_CASE_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCamelCase ( __a = 1_000 ):
SCREAMING_SNAKE_CASE_ = pythagorean_triple(__a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''') | 628 | 1 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def _lowerCamelCase ( __a, __a, __a, __a ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE_ = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE_ = {tokenizer_name: getattr(__a, tokenizer_name + '''Fast''' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE_ = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE_ = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE_ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
SCREAMING_SNAKE_CASE_ = tokenizer_class.from_pretrained(__a, force_download=__a )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = checkpoint.split('''/''' )
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
elif add_prefix:
SCREAMING_SNAKE_CASE_ = checkpoint
SCREAMING_SNAKE_CASE_ = dump_path
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE_ = file_path.split(__a )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
SCREAMING_SNAKE_CASE_ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
SCREAMING_SNAKE_CASE_ = tokenizer.save_pretrained(
__a, legacy_format=__a, filename_prefix=__a )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(__a )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowerCAmelCase__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download) | 628 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( __a ):
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__a, '''_dynamo''' ):
return False
return isinstance(__a, torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( __a, __a = True ):
SCREAMING_SNAKE_CASE_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ = is_compiled_module(__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ = getattr(__a, '''forward''' )
SCREAMING_SNAKE_CASE_ = model.__dict__.pop('''_original_forward''', __a )
if original_forward is not None:
while hasattr(__a, '''__wrapped__''' ):
SCREAMING_SNAKE_CASE_ = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ = forward
if getattr(__a, '''_converted_to_transformer_engine''', __a ):
convert_model(__a, to_transformer_engine=__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = compiled_model
return model
def _lowerCamelCase ( ):
PartialState().wait_for_everyone()
def _lowerCamelCase ( __a, __a ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__a, __a )
elif PartialState().local_process_index == 0:
torch.save(__a, __a )
@contextmanager
def _lowerCamelCase ( **__a ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ = str(__a )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( __a ):
if not hasattr(__a, '''__qualname__''' ) and not hasattr(__a, '''__name__''' ):
SCREAMING_SNAKE_CASE_ = getattr(__a, '''__class__''', __a )
if hasattr(__a, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__a, '''__name__''' ):
return obj.__name__
return str(__a )
def _lowerCamelCase ( __a, __a ):
for key, value in source.items():
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = destination.setdefault(__a, {} )
merge_dicts(__a, __a )
else:
SCREAMING_SNAKE_CASE_ = value
return destination
def _lowerCamelCase ( __a = None ):
if port is None:
SCREAMING_SNAKE_CASE_ = 29_500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0 | 628 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _lowercase (self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFRegNetModel(config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFRegNetForImageClassification(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _lowercase (self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _lowercase (self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_ = layer_type
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'''output_hidden_states''': True} )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'''output_hidden_states''': True} )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase (self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFRegNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def _lowercase (self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) | 628 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 628 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __a, __a, __a, __a ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__a )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__a )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__a, __a, is_trivia_qa=__a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 628 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = MvpTokenizer
UpperCAmelCase__ = MvpTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = filter_roberta_detectors
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _lowercase (self ):
"""simple docstring"""
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def _lowercase (self ):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(SCREAMING_SNAKE_CASE_ , max_length=len(SCREAMING_SNAKE_CASE_ ) , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Test that special tokens are reset
@require_torch
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , SCREAMING_SNAKE_CASE_ )
self.assertIn('''attention_mask''' , SCREAMING_SNAKE_CASE_ )
self.assertNotIn('''labels''' , SCREAMING_SNAKE_CASE_ )
self.assertNotIn('''decoder_attention_mask''' , SCREAMING_SNAKE_CASE_ )
@require_torch
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def _lowercase (self ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 10_24) )
@require_torch
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''A long paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(SCREAMING_SNAKE_CASE_ , text_target=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = inputs['''input_ids''']
SCREAMING_SNAKE_CASE_ = inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A, <mask> AllenNLP sentence.'''
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) | 628 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( __a ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''', __a, )
if isinstance(__a, torch.Tensor ):
return image
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [image]
if isinstance(image[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = image[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE_ = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = np.array(__a ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = image.transpose(0, 3, 1, 2 )
SCREAMING_SNAKE_CASE_ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(image[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return image
def _lowerCamelCase ( __a ):
if isinstance(__a, torch.Tensor ):
return mask
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [mask]
if isinstance(mask[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = mask[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_ = [np.array(m.convert('''L''' ).resize((w, h), resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(mask[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2_50 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = _preprocess_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ = original_image.shape
SCREAMING_SNAKE_CASE_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
SCREAMING_SNAKE_CASE_ = eta
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE_ = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE_ = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '▁'
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
lowerCAmelCase__ = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class snake_case ( __lowercase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ = legacy_behaviour
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , src_lang=SCREAMING_SNAKE_CASE_ , tgt_lang=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = len(self.sp_model )
SCREAMING_SNAKE_CASE_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(SCREAMING_SNAKE_CASE_ )
}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE_ = src_lang if src_lang is not None else '''eng_Latn'''
SCREAMING_SNAKE_CASE_ = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowercase (self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowercase (self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE_ )) + ([0] * len(SCREAMING_SNAKE_CASE_ )) + suffix_ones
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = self(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tgt_lang_id
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip()
return out_string
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "eng_Latn" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "fra_Latn" , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase (self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE_ = [self.cur_lang_code]
SCREAMING_SNAKE_CASE_ = [self.eos_token_id]
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE_ = [self.cur_lang_code]
SCREAMING_SNAKE_CASE_ = [self.eos_token_id] | 628 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a, __a ):
return (preds == labels).mean()
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCAmelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__a, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__a, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__a ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__a, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__a, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__a, args=__a, train_dataset=__a, eval_dataset=__a, compute_metrics=__a, data_collator=__a, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(__a, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', __a, __a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__a )
return results
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 628 | 1 |
"""simple docstring"""
from math import factorial
def _lowerCamelCase ( __a, __a ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(__a ) // (factorial(__a ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
) | 628 |
"""simple docstring"""
import cva
import numpy as np
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__(self ):
"""simple docstring"""
return str(self.k )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(SCREAMING_SNAKE_CASE_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE_ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase__ = HarrisCorner(0.04, 3)
lowerCAmelCase__, lowerCAmelCase__ = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 628 | 1 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = 8
# DPR tok
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dataset''' )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
SCREAMING_SNAKE_CASE_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
SCREAMING_SNAKE_CASE_ = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary. | 628 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = pipe.image_variation(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 628 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''vit_mae'''
def __init__(self , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-1_2 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=0.75 , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = decoder_num_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = decoder_num_hidden_layers
SCREAMING_SNAKE_CASE_ = decoder_intermediate_size
SCREAMING_SNAKE_CASE_ = mask_ratio
SCREAMING_SNAKE_CASE_ = norm_pix_loss | 628 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return x.sum()
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [1, 2]
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': [1, 2], '''b''': [3, 4]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 1}, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = [2, 3]
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': [2, 3], '''b''': [4, 5]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 2}, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 2
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 0, '''c''': 2}
SCREAMING_SNAKE_CASE_ = {
'''a''': np.eye(2 ).astype(SCREAMING_SNAKE_CASE_ ),
'''b''': np.zeros(3 ).astype(SCREAMING_SNAKE_CASE_ ),
'''c''': np.ones(2 ).astype(SCREAMING_SNAKE_CASE_ ),
}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 1 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 3, '''b''': 4}
SCREAMING_SNAKE_CASE_ = {'''a''': 5, '''b''': 6}
SCREAMING_SNAKE_CASE_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
class snake_case :
UpperCAmelCase__ = '''bar'''
SCREAMING_SNAKE_CASE_ = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(SCREAMING_SNAKE_CASE_ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowerCamelCase ( __a, __a, __a ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE_ = {F'{i}': i for i in range(__a )}
SCREAMING_SNAKE_CASE_ = map_nested(lambda __a : x + 10, __a, num_proc=__a, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( __lowercase ):
@require_tf
def _lowercase (self ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE_ = layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE_ = tf.random.uniform((1, 3) )
return model(SCREAMING_SNAKE_CASE_ ).numpy()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _lowercase (self ):
"""simple docstring"""
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE_ = torch.rand(1 , 3 )
return model(SCREAMING_SNAKE_CASE_ ).detach().numpy()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _lowercase (self ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''', [{}] )
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''', [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
], )
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).flatten()
assert output == expected_output
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = A(x=1, y='''foobar''' )
SCREAMING_SNAKE_CASE_ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__a ) == expected_output
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__a ) == expected_output
with pytest.raises(__a ):
asdict([1, A(x=10, y='''foo''' )] )
def _lowerCamelCase ( __a ):
return text.split()
def _lowerCamelCase ( __a ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCamelCase ( ):
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = []
for yield_time, content in iflatmap_unordered(
__a, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__a )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__a ) == 4 | 628 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCAmelCase__ = 'hf-internal-testing/tiny-random-bert'
lowerCAmelCase__ = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
lowerCAmelCase__ = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(SCREAMING_SNAKE_CASE_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''refs''' , '''main''' ) ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
self.assertEqual(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''snapshots''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(os.path.isfile(SCREAMING_SNAKE_CASE_ ) )
# File is cached at the same place the second time.
SCREAMING_SNAKE_CASE_ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Using a specific revision to test the full commit hash.
SCREAMING_SNAKE_CASE_ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , revision='''9b8c223''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''snapshots''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE_ = cached_file('''tiny-random-bert''' , SCREAMING_SNAKE_CASE_ )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''is not a valid git identifier''' ):
SCREAMING_SNAKE_CASE_ = cached_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , revision='''aaaa''' )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''does not appear to have a file named''' ):
SCREAMING_SNAKE_CASE_ = cached_file(SCREAMING_SNAKE_CASE_ , '''conf''' )
def _lowercase (self ):
"""simple docstring"""
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''does not appear to have a file named''' ):
SCREAMING_SNAKE_CASE_ = cached_file(SCREAMING_SNAKE_CASE_ , '''conf''' )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''refs''' , '''main''' ) ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , '''.no_exist''' , SCREAMING_SNAKE_CASE_ , '''conf''' ) ) )
SCREAMING_SNAKE_CASE_ = cached_file(SCREAMING_SNAKE_CASE_ , '''conf''' , _raise_exceptions_for_missing_entries=SCREAMING_SNAKE_CASE_ )
self.assertIsNone(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = cached_file(SCREAMING_SNAKE_CASE_ , '''conf''' , local_files_only=SCREAMING_SNAKE_CASE_ , _raise_exceptions_for_missing_entries=SCREAMING_SNAKE_CASE_ )
self.assertIsNone(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = mock.Mock()
SCREAMING_SNAKE_CASE_ = 5_00
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = HTTPError
SCREAMING_SNAKE_CASE_ = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE_ ) as mock_head:
SCREAMING_SNAKE_CASE_ = cached_file(SCREAMING_SNAKE_CASE_ , '''conf''' , _raise_exceptions_for_connection_errors=SCREAMING_SNAKE_CASE_ )
self.assertIsNone(SCREAMING_SNAKE_CASE_ )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase (self ):
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , SCREAMING_SNAKE_CASE_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , SCREAMING_SNAKE_CASE_ ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , SCREAMING_SNAKE_CASE_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , SCREAMING_SNAKE_CASE_ , revision='''ahaha''' )
SCREAMING_SNAKE_CASE_ = get_file_from_repo('''bert-base-cased''' , SCREAMING_SNAKE_CASE_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
SCREAMING_SNAKE_CASE_ = json.loads(open(SCREAMING_SNAKE_CASE_ , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_68 )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = Path(SCREAMING_SNAKE_CASE_ ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(SCREAMING_SNAKE_CASE_ , '''a.txt''' ) , str(SCREAMING_SNAKE_CASE_ ) )
self.assertIsNone(get_file_from_repo(SCREAMING_SNAKE_CASE_ , '''b.txt''' ) ) | 628 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase__ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''maskformer'''
UpperCAmelCase__ = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ = ['''resnet''', '''swin''']
UpperCAmelCase__ = ['''detr''']
def __init__(self , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 20.0 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_ = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_ = (
decoder_config.pop('''model_type''' ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_ = fpn_feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_ = cross_entropy_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 628 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCAmelCase__ = random.Random()
def _lowerCamelCase ( __a, __a=1.0, __a=None, __a=None ):
if rng is None:
SCREAMING_SNAKE_CASE_ = global_rng
SCREAMING_SNAKE_CASE_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class snake_case ( unittest.TestCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=20_00 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=1_60_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = min_seq_length
SCREAMING_SNAKE_CASE_ = max_seq_length
SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = return_attention_mask
SCREAMING_SNAKE_CASE_ = do_normalize
def _lowercase (self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase (self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = SpeechaTextFeatureExtractor if is_speech_available() else None
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = SpeechaTextFeatureExtractionTester(self )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_ , axis=0 ) - 1 ) < 1e-3 ) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
SCREAMING_SNAKE_CASE_ = np.asarray(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = ['''longest''', '''max_length''', '''do_not_pad''']
SCREAMING_SNAKE_CASE_ = [None, 16, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = feature_extractor(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = inputs.input_features
SCREAMING_SNAKE_CASE_ = inputs.attention_mask
SCREAMING_SNAKE_CASE_ = [np.sum(SCREAMING_SNAKE_CASE_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = ['''longest''', '''max_length''', '''do_not_pad''']
SCREAMING_SNAKE_CASE_ = [None, 16, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = feature_extractor(
SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , return_attention_mask=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = inputs.input_features
SCREAMING_SNAKE_CASE_ = inputs.attention_mask
SCREAMING_SNAKE_CASE_ = [np.sum(SCREAMING_SNAKE_CASE_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = feature_extractor(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=4 , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = inputs.input_features
SCREAMING_SNAKE_CASE_ = inputs.attention_mask
SCREAMING_SNAKE_CASE_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = feature_extractor(
SCREAMING_SNAKE_CASE_ , padding='''longest''' , max_length=4 , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = inputs.input_features
SCREAMING_SNAKE_CASE_ = inputs.attention_mask
SCREAMING_SNAKE_CASE_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = feature_extractor(
SCREAMING_SNAKE_CASE_ , padding='''longest''' , max_length=16 , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = inputs.input_features
SCREAMING_SNAKE_CASE_ = inputs.attention_mask
SCREAMING_SNAKE_CASE_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = np.random.rand(1_00 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ = ds.sort('''id''' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) | 628 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE_ = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE_ = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE_ = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE_ = 128
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ = 192
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ = 256
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ = 352
# set label information
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE_ = '''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = FocalNetConfig(
embed_dim=__a, depths=__a, focal_levels=__a, focal_windows=__a, use_conv_embed=__a, idalabel=__a, labelaid=__a, use_post_layernorm=__a, use_layerscale=__a, )
return config
def _lowerCamelCase ( __a ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ = '''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''encoder.layers''', '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''downsample.proj''', '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.f''', '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.h''', '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.proj''', '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ = '''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head''', '''classifier''' )
else:
SCREAMING_SNAKE_CASE_ = '''focalnet.''' + name
return name
def _lowerCamelCase ( __a, __a, __a=False ):
# fmt: off
SCREAMING_SNAKE_CASE_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''', __a )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__a, map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = get_focalnet_config(__a )
SCREAMING_SNAKE_CASE_ = FocalNetForImageClassification(__a )
model.eval()
# load state dict
model.load_state_dict(__a )
# verify conversion
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__a, size={'''shortest_edge''': 256}, resample=PILImageResampling.BILINEAR, do_center_crop=__a, crop_size=224, do_normalize=__a, image_mean=__a, image_std=__a, )
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
SCREAMING_SNAKE_CASE_ = processor(images=__a, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
SCREAMING_SNAKE_CASE_ = image_transforms(__a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, __a, atol=1E-4 )
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''', outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3], __a, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 628 | 1 |
"""simple docstring"""
import cva
import numpy as np
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__(self ):
"""simple docstring"""
return str(self.k )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(SCREAMING_SNAKE_CASE_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE_ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase__ = HarrisCorner(0.04, 3)
lowerCAmelCase__, lowerCAmelCase__ = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 628 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''glpn'''
def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[32, 64, 1_60, 2_56] , SCREAMING_SNAKE_CASE_=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE_=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=-1 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index | 628 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 628 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 2_55.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
def _lowerCamelCase ( __a = 50 ):
SCREAMING_SNAKE_CASE_ = [1] * (length + 1)
for row_length in range(3, length + 1 ):
for block_length in range(3, row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''') | 628 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case ( __lowercase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_="cls" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = project_dim
SCREAMING_SNAKE_CASE_ = pooler_fn
SCREAMING_SNAKE_CASE_ = learn_encoder
SCREAMING_SNAKE_CASE_ = use_attention_mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = [R'''pooler''', R'''logit_scale''']
UpperCAmelCase__ = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCAmelCase__ = '''roberta'''
UpperCAmelCase__ = RobertaSeriesConfig
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE_ , '''has_pre_transformation''' , SCREAMING_SNAKE_CASE_ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.base_model(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=SCREAMING_SNAKE_CASE_ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = outputs['''hidden_states'''][-2]
SCREAMING_SNAKE_CASE_ = self.pre_LN(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.transformation_pre(SCREAMING_SNAKE_CASE_ )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE_ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 628 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase__ = logging.getLogger(__name__)
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , init_retrieval=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = None
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
SCREAMING_SNAKE_CASE_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE_ = str(distributed_port + 1 )
SCREAMING_SNAKE_CASE_ = dist.new_group(ranks=SCREAMING_SNAKE_CASE_ , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowercase (self ):
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=torch.floataa ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.empty(SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
dist.scatter(SCREAMING_SNAKE_CASE_ , src=0 , scatter_list=SCREAMING_SNAKE_CASE_ , group=self.process_group )
return target_tensor
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE_ = next((addr for addr in addrs if addr.startswith('''e''' )) , SCREAMING_SNAKE_CASE_ )
return ifname
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if not dist.is_initialized():
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self._main_retrieve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ )
# distributed training
SCREAMING_SNAKE_CASE_ = dist.get_world_size(group=self.process_group )
# gather logic
SCREAMING_SNAKE_CASE_ = None
if self._is_main():
SCREAMING_SNAKE_CASE_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(SCREAMING_SNAKE_CASE_ )]
dist.gather(torch.tensor(SCREAMING_SNAKE_CASE_ ) , dst=0 , gather_list=SCREAMING_SNAKE_CASE_ , group=self.process_group )
# scatter logic
SCREAMING_SNAKE_CASE_ = question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
if self._is_main():
assert len(SCREAMING_SNAKE_CASE_ ) == world_size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self._main_retrieve(torch.cat(SCREAMING_SNAKE_CASE_ ).numpy() , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ ), torch.tensor(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs] , target_type=torch.intaa )
SCREAMING_SNAKE_CASE_ = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ ) | 628 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase ( __a, __a=0.9_9_9, __a="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE_ = []
for i in range(__a ):
SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ), __a ) )
return torch.tensor(__a, dtype=torch.floataa )
class snake_case ( __lowercase , __lowercase ):
UpperCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ = 2
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_00_85 , SCREAMING_SNAKE_CASE_ = 0.0_12 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "linspace" , SCREAMING_SNAKE_CASE_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ = torch.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE_ = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = use_karras_sigmas
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ = self.timesteps
SCREAMING_SNAKE_CASE_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
SCREAMING_SNAKE_CASE_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(SCREAMING_SNAKE_CASE_ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.interp(SCREAMING_SNAKE_CASE_ , np.arange(0 , len(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE_ = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE_ , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = timesteps.to(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = timesteps.to(device=SCREAMING_SNAKE_CASE_ )
# empty dt and derivative
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ = defaultdict(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
# get distribution
SCREAMING_SNAKE_CASE_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ = low_idx + 1
SCREAMING_SNAKE_CASE_ = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ = np.clip(SCREAMING_SNAKE_CASE_ , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ = t.reshape(sigma.shape )
return t
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE_ = in_sigmas[0].item()
SCREAMING_SNAKE_CASE_ = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE_ = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE_ = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self ):
"""simple docstring"""
return self.dt is None
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ = derivative
SCREAMING_SNAKE_CASE_ = dt
SCREAMING_SNAKE_CASE_ = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE_ = self.dt
SCREAMING_SNAKE_CASE_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for t in timesteps]
SCREAMING_SNAKE_CASE_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps | 628 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class snake_case ( __lowercase ):
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
requires_backends(self , '''vision''' )
self.check_model_type(SCREAMING_SNAKE_CASE_ )
def __call__(self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return {}, {}, {}
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = image.size
SCREAMING_SNAKE_CASE_ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework )
return model_inputs
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model(**SCREAMING_SNAKE_CASE_ )
return model_outputs
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE_ = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE_ )).astype('''uint8''' )
SCREAMING_SNAKE_CASE_ = Image.fromarray(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = predicted_depth
SCREAMING_SNAKE_CASE_ = depth
return output_dict | 628 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = 8
# DPR tok
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dataset''' )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
SCREAMING_SNAKE_CASE_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
SCREAMING_SNAKE_CASE_ = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary. | 628 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCAmelCase__ = get_tests_dir('fixtures')
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = mock.Mock()
SCREAMING_SNAKE_CASE_ = 5_00
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = HTTPError
SCREAMING_SNAKE_CASE_ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE_ ) as mock_head:
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _lowercase (self ):
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@is_staging_test
class snake_case ( unittest.TestCase ):
@classmethod
def _lowercase (cls ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase (cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-image-processor''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ = CustomImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
SCREAMING_SNAKE_CASE_ = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=SCREAMING_SNAKE_CASE_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' ) | 628 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = left
SCREAMING_SNAKE_CASE_ = right
def __repr__(self ):
"""simple docstring"""
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = collection
SCREAMING_SNAKE_CASE_ = function
if self.collection:
SCREAMING_SNAKE_CASE_ = self._build_tree(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self._update_tree(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self._query_range(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.collection[start] )
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = self._build_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE_ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.fn(node.left.val , node.right.val )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE_ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE_ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 628 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCAmelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCAmelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
def _lowerCamelCase ( __a, __a=100, __a=" " ):
SCREAMING_SNAKE_CASE_ = text.split(__a )
return [character.join(text[i : i + n] ).strip() for i in range(0, len(__a ), __a )]
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = [], []
for title, text in zip(documents['''title'''], documents['''text'''] ):
if text is not None:
for passage in split_text(__a ):
titles.append(title if title is not None else '''''' )
texts.append(__a )
return {"title": titles, "text": texts}
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = ctx_tokenizer(
documents['''title'''], documents['''text'''], truncation=__a, padding='''longest''', return_tensors='''pt''' )['''input_ids''']
SCREAMING_SNAKE_CASE_ = ctx_encoder(input_ids.to(device=__a ), return_dict=__a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _lowerCamelCase ( __a, __a, __a, ):
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
SCREAMING_SNAKE_CASE_ = load_dataset(
'''csv''', data_files=[rag_example_args.csv_path], split='''train''', delimiter='''\t''', column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
SCREAMING_SNAKE_CASE_ = dataset.map(__a, batched=__a, num_proc=processing_args.num_proc )
# And compute the embeddings
SCREAMING_SNAKE_CASE_ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__a )
SCREAMING_SNAKE_CASE_ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
SCREAMING_SNAKE_CASE_ = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
SCREAMING_SNAKE_CASE_ = dataset.map(
partial(__a, ctx_encoder=__a, ctx_tokenizer=__a ), batched=__a, batch_size=processing_args.batch_size, features=__a, )
# And finally save your dataset
SCREAMING_SNAKE_CASE_ = os.path.join(rag_example_args.output_dir, '''my_knowledge_dataset''' )
dataset.save_to_disk(__a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
SCREAMING_SNAKE_CASE_ = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''', custom_index=__a )
# And save the index
SCREAMING_SNAKE_CASE_ = os.path.join(rag_example_args.output_dir, '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class snake_case :
UpperCAmelCase__ = field(
default=str(Path(__lowercase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
UpperCAmelCase__ = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
UpperCAmelCase__ = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
UpperCAmelCase__ = field(
default=str(Path(__lowercase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(
default=__lowercase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
UpperCAmelCase__ = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCAmelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 628 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0'
raise ValueError(__a )
SCREAMING_SNAKE_CASE_ = 1
for i in range(1, __a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 | 1 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = 'cpu'
lowerCAmelCase__ = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
lowerCAmelCase__ = 'path-to-your-trained-model'
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 64, 64)
lowerCAmelCase__ = torch.rand(1) * 999
lowerCAmelCase__ = torch.randn(2, 77, 768)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 666
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'generator': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png') | 628 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE_ = i + 1
else:
SCREAMING_SNAKE_CASE_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''') | 628 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCamelCase ( __a, __a, __a, __a, __a ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(__a ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __a, __a, __a ), minimax(depth + 1, node_index * 2 + 1, __a, __a, __a ), )
return min(
minimax(depth + 1, node_index * 2, __a, __a, __a ), minimax(depth + 1, node_index * 2 + 1, __a, __a, __a ), )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = [90, 23, 6, 33, 21, 65, 123, 34_423]
SCREAMING_SNAKE_CASE_ = math.log(len(__a ), 2 )
print('''Optimal value : ''', end='''''' )
print(minimax(0, 0, __a, __a, __a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 628 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a=2, __a=3, __a=16, __a = 10, __a = 2 ):
def get_dataset(__a ):
SCREAMING_SNAKE_CASE_ = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(__a, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=None ):
SCREAMING_SNAKE_CASE_ = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = batch
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(__a, __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case ( nn.Module ):
def __init__(self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return x * self.a + self.b
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE_ = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE_ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE_ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase__ = DummyModel()
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCAmelCase__, lowerCAmelCase__ = dummy_dataloaders()
lowerCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 628 | 1 |
"""simple docstring"""
import torch
from transformers import AutoModel
class snake_case ( torch.nn.Module ):
def __init__(self , SCREAMING_SNAKE_CASE_="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
SCREAMING_SNAKE_CASE_ = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.nn.CosineSimilarity(3 , 1e-0_8 )
SCREAMING_SNAKE_CASE_ = torch.nn.Softmax(dim=1 )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self.bert(**SCREAMING_SNAKE_CASE_ ).last_hidden_state
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = W_supports['''sizes'''].tolist()
SCREAMING_SNAKE_CASE_ = W_supports['''start_token_id'''].item()
SCREAMING_SNAKE_CASE_ = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
SCREAMING_SNAKE_CASE_ = self.BERT(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.BERT(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = W_supports['''input_ids'''] == start_token_id
SCREAMING_SNAKE_CASE_ = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
SCREAMING_SNAKE_CASE_ = 0
else:
SCREAMING_SNAKE_CASE_ = support_sizes[i - 1]
SCREAMING_SNAKE_CASE_ = S[s : s + size][start_token_masks[s : s + size]]
SCREAMING_SNAKE_CASE_ = S[s : s + size][end_token_masks[s : s + size]]
SCREAMING_SNAKE_CASE_ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
SCREAMING_SNAKE_CASE_ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
SCREAMING_SNAKE_CASE_ = torch.vstack((p_starts, p_start) )
SCREAMING_SNAKE_CASE_ = torch.vstack((p_ends, p_end) )
else:
SCREAMING_SNAKE_CASE_ = p_start
SCREAMING_SNAKE_CASE_ = p_end
return p_starts, p_ends | 628 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _lowercase (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ = samplea.shape[0]
SCREAMING_SNAKE_CASE_ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop()
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 628 | 1 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = SMALL_MODEL_IDENTIFIER
SCREAMING_SNAKE_CASE_ = '''pt'''
SCREAMING_SNAKE_CASE_ = '''tf'''
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_pretrained(self.test_model , from_pt=SCREAMING_SNAKE_CASE_ )
model_tf.save_pretrained(SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''mock_framework'''
# Framework provided - return whatever the user provides
SCREAMING_SNAKE_CASE_ = FeaturesManager.determine_framework(self.test_model , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
with patch('''transformers.onnx.features.is_tf_available''' , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
SCREAMING_SNAKE_CASE_ = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
with patch('''transformers.onnx.features.is_torch_available''' , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_tf )
# Both in environment -> use PyTorch
SCREAMING_SNAKE_CASE_ = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
with patch('''transformers.onnx.features.is_tf_available''' , SCREAMING_SNAKE_CASE_ ), patch(
'''transformers.onnx.features.is_torch_available''' , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(SCREAMING_SNAKE_CASE_ , self.framework_pt )
# Both not in environment -> raise error
SCREAMING_SNAKE_CASE_ = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = MagicMock(return_value=SCREAMING_SNAKE_CASE_ )
with patch('''transformers.onnx.features.is_tf_available''' , SCREAMING_SNAKE_CASE_ ), patch(
'''transformers.onnx.features.is_torch_available''' , SCREAMING_SNAKE_CASE_ ):
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = FeaturesManager.determine_framework(self.test_model ) | 628 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = TransfoXLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [0, 4, 8, 7] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' ) | 628 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 628 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__a, max_perimeter + 1 ):
SCREAMING_SNAKE_CASE_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__a ):
SCREAMING_SNAKE_CASE_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCamelCase ( __a = 1_000 ):
SCREAMING_SNAKE_CASE_ = pythagorean_triple(__a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''') | 628 | 1 |
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''mask2former'''
UpperCAmelCase__ = ['''swin''']
UpperCAmelCase__ = {'''hidden_size''': '''hidden_dim'''}
def __init__(self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 10_24 , SCREAMING_SNAKE_CASE_ = "relu" , SCREAMING_SNAKE_CASE_ = 6 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 8 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 20_48 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 4 , SCREAMING_SNAKE_CASE_ = 2_55 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 2.0 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 1_25_44 , SCREAMING_SNAKE_CASE_ = 3.0 , SCREAMING_SNAKE_CASE_ = 0.75 , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
SCREAMING_SNAKE_CASE_ = hidden_dim
SCREAMING_SNAKE_CASE_ = encoder_feedforward_dim
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = dim_feedforward
SCREAMING_SNAKE_CASE_ = pre_norm
SCREAMING_SNAKE_CASE_ = enforce_input_projection
SCREAMING_SNAKE_CASE_ = common_stride
SCREAMING_SNAKE_CASE_ = ignore_value
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = class_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = train_num_points
SCREAMING_SNAKE_CASE_ = oversample_ratio
SCREAMING_SNAKE_CASE_ = importance_sample_ratio
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = feature_strides
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 628 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( __a ):
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__a, '''_dynamo''' ):
return False
return isinstance(__a, torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( __a, __a = True ):
SCREAMING_SNAKE_CASE_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ = is_compiled_module(__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ = getattr(__a, '''forward''' )
SCREAMING_SNAKE_CASE_ = model.__dict__.pop('''_original_forward''', __a )
if original_forward is not None:
while hasattr(__a, '''__wrapped__''' ):
SCREAMING_SNAKE_CASE_ = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ = forward
if getattr(__a, '''_converted_to_transformer_engine''', __a ):
convert_model(__a, to_transformer_engine=__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = compiled_model
return model
def _lowerCamelCase ( ):
PartialState().wait_for_everyone()
def _lowerCamelCase ( __a, __a ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__a, __a )
elif PartialState().local_process_index == 0:
torch.save(__a, __a )
@contextmanager
def _lowerCamelCase ( **__a ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ = str(__a )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( __a ):
if not hasattr(__a, '''__qualname__''' ) and not hasattr(__a, '''__name__''' ):
SCREAMING_SNAKE_CASE_ = getattr(__a, '''__class__''', __a )
if hasattr(__a, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__a, '''__name__''' ):
return obj.__name__
return str(__a )
def _lowerCamelCase ( __a, __a ):
for key, value in source.items():
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = destination.setdefault(__a, {} )
merge_dicts(__a, __a )
else:
SCREAMING_SNAKE_CASE_ = value
return destination
def _lowerCamelCase ( __a = None ):
if port is None:
SCREAMING_SNAKE_CASE_ = 29_500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0 | 628 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '▁'
lowerCAmelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
lowerCAmelCase__ = {
'google/pegasus-xsum': 512,
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PegasusTokenizer
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<mask_2>" , SCREAMING_SNAKE_CASE_="<mask_1>" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1_03 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f'additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE_ )}, but is'
f' {type(SCREAMING_SNAKE_CASE_ )}' )
SCREAMING_SNAKE_CASE_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(SCREAMING_SNAKE_CASE_ ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE_ ) ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
SCREAMING_SNAKE_CASE_ = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , mask_token_sent=SCREAMING_SNAKE_CASE_ , offset=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE_ )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,) | 628 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class snake_case ( __lowercase ):
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _lowercase (self , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
if top_k is not None:
SCREAMING_SNAKE_CASE_ = top_k
return {}, {}, postprocess_params
def __call__(self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework )
return model_inputs
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model(**SCREAMING_SNAKE_CASE_ )
return model_outputs
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE_ = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ = model_outputs.logits.softmax(-1 )[0]
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = probs.topk(SCREAMING_SNAKE_CASE_ )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE_ = stable_softmax(model_outputs.logits , axis=-1 )[0]
SCREAMING_SNAKE_CASE_ = tf.math.top_k(SCREAMING_SNAKE_CASE_ , k=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
SCREAMING_SNAKE_CASE_ = scores.tolist()
SCREAMING_SNAKE_CASE_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] | 628 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __a, __a, __a, __a ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__a )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__a )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__a, __a, is_trivia_qa=__a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 628 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase__ = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class snake_case ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}] )
SCREAMING_SNAKE_CASE_ = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
] , )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
# Legacy behavior
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}]] )
SCREAMING_SNAKE_CASE_ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}],
] , )
SCREAMING_SNAKE_CASE_ = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [
{'''label''': '''LABEL_0''', '''score''': 0.5_04},
{'''label''': '''LABEL_0''', '''score''': 0.5_04},
] , )
@require_torch
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
@require_tf
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] )
@slow
@require_torch
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline('''text-classification''' )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
SCREAMING_SNAKE_CASE_ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_88}] )
@slow
@require_tf
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline('''text-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
SCREAMING_SNAKE_CASE_ = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
SCREAMING_SNAKE_CASE_ = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_88}] )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
SCREAMING_SNAKE_CASE_ = '''HuggingFace is in'''
SCREAMING_SNAKE_CASE_ = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
SCREAMING_SNAKE_CASE_ = ['''HuggingFace is in ''', '''Paris is in France''']
SCREAMING_SNAKE_CASE_ = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )}, {'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
SCREAMING_SNAKE_CASE_ = text_classifier(SCREAMING_SNAKE_CASE_ , top_k=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [[{'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )}] * N, [{'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )}] * N] , )
SCREAMING_SNAKE_CASE_ = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
SCREAMING_SNAKE_CASE_ = text_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
SCREAMING_SNAKE_CASE_ = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
text_classifier(SCREAMING_SNAKE_CASE_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
SCREAMING_SNAKE_CASE_ = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , [{'''label''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) | 628 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( __a ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''', __a, )
if isinstance(__a, torch.Tensor ):
return image
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [image]
if isinstance(image[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = image[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE_ = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = np.array(__a ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = image.transpose(0, 3, 1, 2 )
SCREAMING_SNAKE_CASE_ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(image[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return image
def _lowerCamelCase ( __a ):
if isinstance(__a, torch.Tensor ):
return mask
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [mask]
if isinstance(mask[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = mask[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_ = [np.array(m.convert('''L''' ).resize((w, h), resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(mask[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2_50 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = _preprocess_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ = original_image.shape
SCREAMING_SNAKE_CASE_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
SCREAMING_SNAKE_CASE_ = eta
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE_ = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE_ = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __a, __a, __a ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = RemBertConfig.from_json_file(__a )
print('''Building PyTorch model from configuration: {}'''.format(str(__a ) ) )
SCREAMING_SNAKE_CASE_ = RemBertModel(__a )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__a, __a, __a )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__a ) )
torch.save(model.state_dict(), __a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 628 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a, __a ):
return (preds == labels).mean()
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCAmelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__a, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__a, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__a ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__a, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__a, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__a, args=__a, train_dataset=__a, eval_dataset=__a, compute_metrics=__a, data_collator=__a, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(__a, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', __a, __a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__a )
return results
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 628 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class snake_case ( __lowercase ):
def __init__(self ):
"""simple docstring"""
self.test()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE_ = self.advance()
if not self.does_advance(SCREAMING_SNAKE_CASE_ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.update(SCREAMING_SNAKE_CASE_ )
counter += 1
if counter > 1_00_00:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def _lowercase (self ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _lowercase (self ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _lowercase (self ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _lowercase (self , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
SCREAMING_SNAKE_CASE_ = token_ids
SCREAMING_SNAKE_CASE_ = len(self.token_ids )
SCREAMING_SNAKE_CASE_ = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE_ = False
def _lowercase (self ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}' )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
if self.does_advance(SCREAMING_SNAKE_CASE_ ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE_ = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE_ = True
self.reset()
return stepped, completed, reset
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = 0
def _lowercase (self ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def _lowercase (self , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE_ = self.seqlen
SCREAMING_SNAKE_CASE_ = self.fulfilled_idx
SCREAMING_SNAKE_CASE_ = self.completed
return new_constraint
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = max([len(SCREAMING_SNAKE_CASE_ ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE_ = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE_ = root
for tidx, token_id in enumerate(SCREAMING_SNAKE_CASE_ ):
if token_id not in level:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = level[token_id]
if no_subsets and self.has_subsets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f' {nested_token_ids}.' )
SCREAMING_SNAKE_CASE_ = root
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE_ = start[current_token]
SCREAMING_SNAKE_CASE_ = list(start.keys() )
return next_tokens
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.next_tokens(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 0
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = list(root.values() )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 1
else:
return sum([self.count_leaves(SCREAMING_SNAKE_CASE_ ) for nn in next_nodes] )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.count_leaves(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) != leaf_count
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for token_ids in nested_token_ids ):
raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
SCREAMING_SNAKE_CASE_ = DisjunctiveTrie(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nested_token_ids
SCREAMING_SNAKE_CASE_ = self.trie.max_height
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = False
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.trie.next_tokens(self.current_seq )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
else:
return token_list
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}' )
SCREAMING_SNAKE_CASE_ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}' )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
if self.does_advance(SCREAMING_SNAKE_CASE_ ):
self.current_seq.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = True
else:
SCREAMING_SNAKE_CASE_ = True
self.reset()
SCREAMING_SNAKE_CASE_ = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE_ = completed
return stepped, completed, reset
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = []
def _lowercase (self ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _lowercase (self , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE_ = self.seqlen
SCREAMING_SNAKE_CASE_ = self.current_seq
SCREAMING_SNAKE_CASE_ = self.completed
return new_constraint
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE_ = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = False
self.init_state()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = [constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) for constraint in self.constraints]
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE_ = constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.append(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.extend(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = self.inprogress_constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.append(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.extend(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
else:
return token_list
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.add(SCREAMING_SNAKE_CASE_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False, False
if self.completed:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.inprogress_constraint.update(SCREAMING_SNAKE_CASE_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE_ = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE_ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pending_constraint.update(SCREAMING_SNAKE_CASE_ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = None
if not complete and stepped:
SCREAMING_SNAKE_CASE_ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE_ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE_ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _lowercase (self , SCREAMING_SNAKE_CASE_=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE_ = [
constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE_ = self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [constraint.copy() for constraint in self.pending_constraints]
return new_state | 628 |
"""simple docstring"""
import cva
import numpy as np
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__(self ):
"""simple docstring"""
return str(self.k )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(SCREAMING_SNAKE_CASE_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE_ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase__ = HarrisCorner(0.04, 3)
lowerCAmelCase__, lowerCAmelCase__ = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 628 | 1 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCAmelCase__ = 299792458
# Symbols
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = symbols('ct x y z')
def _lowerCamelCase ( __a ):
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def _lowerCamelCase ( __a ):
return 1 / sqrt(1 - beta(__a ) ** 2 )
def _lowerCamelCase ( __a ):
return np.array(
[
[gamma(__a ), -gamma(__a ) * beta(__a ), 0, 0],
[-gamma(__a ) * beta(__a ), gamma(__a ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _lowerCamelCase ( __a, __a = None ):
# Ensure event is not empty
if event is None:
SCREAMING_SNAKE_CASE_ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__a ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCAmelCase__ = transform(29979245)
print('Example of four vector: ')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
lowerCAmelCase__ = {ct: c, x: 1, y: 1, z: 1}
lowerCAmelCase__ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''') | 628 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = pipe.image_variation(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 628 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case ( __lowercase , __lowercase ):
UpperCAmelCase__ = '''swin'''
UpperCAmelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__(self , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=96 , SCREAMING_SNAKE_CASE_=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = use_absolute_embeddings
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
SCREAMING_SNAKE_CASE_ = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
class snake_case ( __lowercase ):
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def _lowercase (self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase (self ):
"""simple docstring"""
return 1e-4 | 628 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return x.sum()
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [1, 2]
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': [1, 2], '''b''': [3, 4]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 1}, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = [2, 3]
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': [2, 3], '''b''': [4, 5]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 2}, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 2
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 0, '''c''': 2}
SCREAMING_SNAKE_CASE_ = {
'''a''': np.eye(2 ).astype(SCREAMING_SNAKE_CASE_ ),
'''b''': np.zeros(3 ).astype(SCREAMING_SNAKE_CASE_ ),
'''c''': np.ones(2 ).astype(SCREAMING_SNAKE_CASE_ ),
}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 1 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 3, '''b''': 4}
SCREAMING_SNAKE_CASE_ = {'''a''': 5, '''b''': 6}
SCREAMING_SNAKE_CASE_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
class snake_case :
UpperCAmelCase__ = '''bar'''
SCREAMING_SNAKE_CASE_ = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(SCREAMING_SNAKE_CASE_ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowerCamelCase ( __a, __a, __a ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE_ = {F'{i}': i for i in range(__a )}
SCREAMING_SNAKE_CASE_ = map_nested(lambda __a : x + 10, __a, num_proc=__a, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( __lowercase ):
@require_tf
def _lowercase (self ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE_ = layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE_ = tf.random.uniform((1, 3) )
return model(SCREAMING_SNAKE_CASE_ ).numpy()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _lowercase (self ):
"""simple docstring"""
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE_ = torch.rand(1 , 3 )
return model(SCREAMING_SNAKE_CASE_ ).detach().numpy()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _lowercase (self ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''', [{}] )
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''', [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
], )
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).flatten()
assert output == expected_output
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = A(x=1, y='''foobar''' )
SCREAMING_SNAKE_CASE_ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__a ) == expected_output
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__a ) == expected_output
with pytest.raises(__a ):
asdict([1, A(x=10, y='''foo''' )] )
def _lowerCamelCase ( __a ):
return text.split()
def _lowerCamelCase ( __a ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCamelCase ( ):
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = []
for yield_time, content in iflatmap_unordered(
__a, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__a )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__a ) == 4 | 628 | 1 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCAmelCase__ = '__DUMMY_TRANSFORMERS_USER__'
lowerCAmelCase__ = 'Dummy User'
lowerCAmelCase__ = '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'
lowerCAmelCase__ = 'https://hub-ci.huggingface.co'
lowerCAmelCase__ = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
lowerCAmelCase__ = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
lowerCAmelCase__ = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _lowerCamelCase ( __a ):
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''', __a )
@pytest.fixture
def _lowerCamelCase ( __a ):
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''', __a )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''', __a )
@pytest.fixture
def _lowerCamelCase ( __a ):
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''', __a )
@pytest.fixture
def _lowerCamelCase ( __a, __a ):
HfFolder.save_token(__a )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( ):
return HfApi(endpoint=__a )
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = HfFolder.get_token()
HfFolder.save_token(__a )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__a )
@pytest.fixture
def _lowerCamelCase ( __a ):
def _cleanup_repo(__a ):
hf_api.delete_repo(__a, token=__a, repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def _lowerCamelCase ( __a ):
@contextmanager
def _temporary_repo(__a ):
try:
yield repo_id
finally:
cleanup_repo(__a )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = F'repo_txt_data-{int(time.time() * 10E3 )}'
SCREAMING_SNAKE_CASE_ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(__a, token=__a, repo_type='''dataset''', private=__a )
hf_api.upload_file(
token=__a, path_or_fileobj=str(__a ), path_in_repo='''data/text_data.txt''', repo_id=__a, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(__a, token=__a, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCamelCase ( __a, __a, __a ):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = F'repo_zipped_txt_data-{int(time.time() * 10E3 )}'
SCREAMING_SNAKE_CASE_ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(__a, token=__a, repo_type='''dataset''', private=__a )
hf_api.upload_file(
token=__a, path_or_fileobj=str(__a ), path_in_repo='''data.zip''', repo_id=__a, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(__a, token=__a, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCamelCase ( __a, __a, __a ):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = F'repo_zipped_img_data-{int(time.time() * 10E3 )}'
SCREAMING_SNAKE_CASE_ = F'{CI_HUB_USER}/{repo_name}'
hf_api.create_repo(__a, token=__a, repo_type='''dataset''', private=__a )
hf_api.upload_file(
token=__a, path_or_fileobj=str(__a ), path_in_repo='''data.zip''', repo_id=__a, repo_type='''dataset''', )
yield repo_id
try:
hf_api.delete_repo(__a, token=__a, repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCamelCase ( __a, __a, __a ):
return hf_private_dataset_repo_zipped_img_data_ | 628 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase__ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''maskformer'''
UpperCAmelCase__ = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ = ['''resnet''', '''swin''']
UpperCAmelCase__ = ['''detr''']
def __init__(self , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 20.0 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_ = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_ = (
decoder_config.pop('''model_type''' ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_ = fpn_feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_ = cross_entropy_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 628 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CycleDiffusionPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , num_train_timesteps=10_00 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE_ = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = image / 2 + 0.5
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(SCREAMING_SNAKE_CASE_ , '''half''' ):
SCREAMING_SNAKE_CASE_ = module.half()
SCREAMING_SNAKE_CASE_ = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _lowercase (self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def _lowercase (self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _lowercase (self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowercase (self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _lowercase (self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = init_image.resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE_ = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE_ = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE_ = CycleDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = '''A black colored car'''
SCREAMING_SNAKE_CASE_ = '''A blue colored car'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
SCREAMING_SNAKE_CASE_ = init_image.resize((5_12, 5_12) )
SCREAMING_SNAKE_CASE_ = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE_ = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE_ = CycleDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = '''A black colored car'''
SCREAMING_SNAKE_CASE_ = '''A blue colored car'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2 | 628 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE_ = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE_ = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE_ = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE_ = 128
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ = 192
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ = 256
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ = 352
# set label information
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE_ = '''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = FocalNetConfig(
embed_dim=__a, depths=__a, focal_levels=__a, focal_windows=__a, use_conv_embed=__a, idalabel=__a, labelaid=__a, use_post_layernorm=__a, use_layerscale=__a, )
return config
def _lowerCamelCase ( __a ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ = '''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''encoder.layers''', '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''downsample.proj''', '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.f''', '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.h''', '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.proj''', '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ = '''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head''', '''classifier''' )
else:
SCREAMING_SNAKE_CASE_ = '''focalnet.''' + name
return name
def _lowerCamelCase ( __a, __a, __a=False ):
# fmt: off
SCREAMING_SNAKE_CASE_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''', __a )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__a, map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = get_focalnet_config(__a )
SCREAMING_SNAKE_CASE_ = FocalNetForImageClassification(__a )
model.eval()
# load state dict
model.load_state_dict(__a )
# verify conversion
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__a, size={'''shortest_edge''': 256}, resample=PILImageResampling.BILINEAR, do_center_crop=__a, crop_size=224, do_normalize=__a, image_mean=__a, image_std=__a, )
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
SCREAMING_SNAKE_CASE_ = processor(images=__a, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
SCREAMING_SNAKE_CASE_ = image_transforms(__a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, __a, atol=1E-4 )
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''', outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3], __a, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 628 | 1 |
"""simple docstring"""
def _lowerCamelCase ( __a, __a, __a ):
def count_of_possible_combinations(__a ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__a )
def _lowerCamelCase ( __a, __a, __a ):
def count_of_possible_combinations_with_dp_array(
__a, __a ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
SCREAMING_SNAKE_CASE_ = sum(
count_of_possible_combinations_with_dp_array(target - item, __a )
for item in array )
SCREAMING_SNAKE_CASE_ = answer
return answer
SCREAMING_SNAKE_CASE_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__a, __a )
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = [0] * (target + 1)
SCREAMING_SNAKE_CASE_ = 1
for i in range(1, target + 1 ):
for j in range(__a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = 3
lowerCAmelCase__ = 5
lowerCAmelCase__ = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 628 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''glpn'''
def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[32, 64, 1_60, 2_56] , SCREAMING_SNAKE_CASE_=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE_=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=-1 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index | 628 | 1 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 628 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 2_55.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 628 | 1 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=10_00 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = range_bbox
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _lowercase (self ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LiltModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return True
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LiltModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def _lowercase (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase (self ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(input_ids=SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.Size([1, 2, 7_68] )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=SCREAMING_SNAKE_CASE_ , )
self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) ) | 628 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case ( __lowercase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_="cls" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = project_dim
SCREAMING_SNAKE_CASE_ = pooler_fn
SCREAMING_SNAKE_CASE_ = learn_encoder
SCREAMING_SNAKE_CASE_ = use_attention_mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = [R'''pooler''', R'''logit_scale''']
UpperCAmelCase__ = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCAmelCase__ = '''roberta'''
UpperCAmelCase__ = RobertaSeriesConfig
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE_ , '''has_pre_transformation''' , SCREAMING_SNAKE_CASE_ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.base_model(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=SCREAMING_SNAKE_CASE_ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = outputs['''hidden_states'''][-2]
SCREAMING_SNAKE_CASE_ = self.pre_LN(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.transformation_pre(SCREAMING_SNAKE_CASE_ )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE_ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 628 | 1 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = pipe.image_variation(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 628 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase ( __a, __a=0.9_9_9, __a="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE_ = []
for i in range(__a ):
SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ), __a ) )
return torch.tensor(__a, dtype=torch.floataa )
class snake_case ( __lowercase , __lowercase ):
UpperCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ = 2
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_00_85 , SCREAMING_SNAKE_CASE_ = 0.0_12 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "linspace" , SCREAMING_SNAKE_CASE_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ = torch.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE_ = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = use_karras_sigmas
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ = self.timesteps
SCREAMING_SNAKE_CASE_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
SCREAMING_SNAKE_CASE_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(SCREAMING_SNAKE_CASE_ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.interp(SCREAMING_SNAKE_CASE_ , np.arange(0 , len(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE_ = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE_ , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = timesteps.to(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = timesteps.to(device=SCREAMING_SNAKE_CASE_ )
# empty dt and derivative
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ = defaultdict(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
# get distribution
SCREAMING_SNAKE_CASE_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ = low_idx + 1
SCREAMING_SNAKE_CASE_ = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ = np.clip(SCREAMING_SNAKE_CASE_ , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ = t.reshape(sigma.shape )
return t
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE_ = in_sigmas[0].item()
SCREAMING_SNAKE_CASE_ = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE_ = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE_ = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self ):
"""simple docstring"""
return self.dt is None
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ = derivative
SCREAMING_SNAKE_CASE_ = dt
SCREAMING_SNAKE_CASE_ = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE_ = self.dt
SCREAMING_SNAKE_CASE_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for t in timesteps]
SCREAMING_SNAKE_CASE_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps | 628 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 2_55.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 628 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = 8
# DPR tok
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dataset''' )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
SCREAMING_SNAKE_CASE_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
SCREAMING_SNAKE_CASE_ = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary. | 628 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''shortest_edge''': 2_56}
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ ) | 628 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = left
SCREAMING_SNAKE_CASE_ = right
def __repr__(self ):
"""simple docstring"""
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = collection
SCREAMING_SNAKE_CASE_ = function
if self.collection:
SCREAMING_SNAKE_CASE_ = self._build_tree(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self._update_tree(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self._query_range(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.collection[start] )
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = self._build_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE_ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.fn(node.left.val , node.right.val )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE_ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE_ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 628 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase__ = 'docs/source/en/_toctree.yml'
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__a, key=lambda __a : s["title"].lower() )
def _lowerCamelCase ( __a=False ):
with open(__a, encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ = api_doc[model_idx]['''sections''']
SCREAMING_SNAKE_CASE_ = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section]
SCREAMING_SNAKE_CASE_ = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ = modality_doc['''sections''']
SCREAMING_SNAKE_CASE_ = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ = True
if overwrite:
SCREAMING_SNAKE_CASE_ = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ = model_doc
SCREAMING_SNAKE_CASE_ = api_doc
with open(__a, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a, allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite) | 628 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0'
raise ValueError(__a )
SCREAMING_SNAKE_CASE_ = 1
for i in range(1, __a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = jax.device_count()
SCREAMING_SNAKE_CASE_ = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = replicate(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = shard(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() )
SCREAMING_SNAKE_CASE_ = sd_pipe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , jit=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
SCREAMING_SNAKE_CASE_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_ = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_ = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''stabilityai/stable-diffusion-2'''
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = FlaxDPMSolverMultistepScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = FlaxStableDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , revision='''bf16''' , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_ = scheduler_params
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = jax.device_count()
SCREAMING_SNAKE_CASE_ = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = replicate(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = shard(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() )
SCREAMING_SNAKE_CASE_ = sd_pipe(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , jit=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
SCREAMING_SNAKE_CASE_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_ = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_ = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 628 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE_ = i + 1
else:
SCREAMING_SNAKE_CASE_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''') | 628 | 1 |
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
SCREAMING_SNAKE_CASE_ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = f'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
SCREAMING_SNAKE_CASE_ = [sys.executable] + distributed_args
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) | 628 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a=2, __a=3, __a=16, __a = 10, __a = 2 ):
def get_dataset(__a ):
SCREAMING_SNAKE_CASE_ = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(__a, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=None ):
SCREAMING_SNAKE_CASE_ = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = batch
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(__a, __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case ( nn.Module ):
def __init__(self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return x * self.a + self.b
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE_ = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE_ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE_ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase__ = DummyModel()
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCAmelCase__, lowerCAmelCase__ = dummy_dataloaders()
lowerCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 628 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 628 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _lowercase (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ = samplea.shape[0]
SCREAMING_SNAKE_CASE_ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop()
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 628 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.