code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 718 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) | 628 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ = BitConfig(
conv_layer=__a, num_labels=1_000, idalabel=__a, labelaid=__a, )
return config
def _lowerCamelCase ( __a ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ = '''bit.encoder.''' + name
return name
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( __a, __a, __a=False ):
SCREAMING_SNAKE_CASE_ = get_config(__a )
# load original model from timm
SCREAMING_SNAKE_CASE_ = create_model(__a, pretrained=__a )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = BitForImageClassification(__a )
model.eval()
model.load_state_dict(__a )
# create image processor
SCREAMING_SNAKE_CASE_ = create_transform(**resolve_data_config({}, model=__a ) )
SCREAMING_SNAKE_CASE_ = transform.transforms
SCREAMING_SNAKE_CASE_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__a, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__a, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__a, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = transform(__a ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = processor(__a, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__a, __a )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ = timm_model(__a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__a, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__a ).mkdir(exist_ok=__a )
print(F'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(F'ybelkada/{model_name}' )
processor.push_to_hub(F'ybelkada/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 719 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __a, __a, __a, __a ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__a )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__a )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__a, __a, is_trivia_qa=__a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 628 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''shortest_edge''': 2_56}
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
SCREAMING_SNAKE_CASE_ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ ) | 720 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( __a ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''', __a, )
if isinstance(__a, torch.Tensor ):
return image
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [image]
if isinstance(image[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = image[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE_ = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = np.array(__a ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = image.transpose(0, 3, 1, 2 )
SCREAMING_SNAKE_CASE_ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(image[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return image
def _lowerCamelCase ( __a ):
if isinstance(__a, torch.Tensor ):
return mask
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [mask]
if isinstance(mask[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = mask[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_ = [np.array(m.convert('''L''' ).resize((w, h), resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(mask[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2_50 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = _preprocess_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ = original_image.shape
SCREAMING_SNAKE_CASE_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
SCREAMING_SNAKE_CASE_ = eta
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE_ = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE_ = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ ) | 628 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
lowerCAmelCase__ = {
'ctrl': 256,
}
lowerCAmelCase__ = {
'Pregnancy': 168629,
'Christianity': 7675,
'Explain': 106423,
'Fitness': 63440,
'Saving': 63163,
'Ask': 27171,
'Ass': 95985,
'Joke': 163509,
'Questions': 45622,
'Thoughts': 49605,
'Retail': 52342,
'Feminism': 164338,
'Writing': 11992,
'Atheism': 192263,
'Netflix': 48616,
'Computing': 39639,
'Opinion': 43213,
'Alone': 44967,
'Funny': 58917,
'Gaming': 40358,
'Human': 4088,
'India': 1331,
'Joker': 77138,
'Diet': 36206,
'Legal': 11859,
'Norman': 4939,
'Tip': 72689,
'Weight': 52343,
'Movies': 46273,
'Running': 23425,
'Science': 2090,
'Horror': 37793,
'Confession': 60572,
'Finance': 12250,
'Politics': 16360,
'Scary': 191985,
'Support': 12654,
'Technologies': 32516,
'Teenage': 66160,
'Event': 32769,
'Learned': 67460,
'Notion': 182770,
'Wikipedia': 37583,
'Books': 6665,
'Extract': 76050,
'Confessions': 102701,
'Conspiracy': 75932,
'Links': 63674,
'Narcissus': 150425,
'Relationship': 54766,
'Relationships': 134796,
'Reviews': 41671,
'News': 4256,
'Translation': 26820,
'multilingual': 128406,
}
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ = char
SCREAMING_SNAKE_CASE_ = set(__a )
return pairs
class snake_case ( __lowercase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTROL_CODES
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<unk>" , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE_ = merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE_ = [tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = {}
@property
def _lowercase (self ):
"""simple docstring"""
return len(self.encoder )
def _lowercase (self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ = tuple(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = bigram
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
SCREAMING_SNAKE_CASE_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ = tuple(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ = get_pairs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = word[:-4]
SCREAMING_SNAKE_CASE_ = word
return word
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
SCREAMING_SNAKE_CASE_ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE_ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far) | 721 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a, __a ):
return (preds == labels).mean()
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCAmelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__a, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__a, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__a ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__a, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__a, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__a, args=__a, train_dataset=__a, eval_dataset=__a, compute_metrics=__a, data_collator=__a, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(__a, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', __a, __a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__a )
return results
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 628 | 0 |
"""simple docstring"""
from __future__ import annotations
import queue
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = data
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def _lowerCamelCase ( ):
print('''\n********Press N to stop entering at any point of time********\n''' )
SCREAMING_SNAKE_CASE_ = input('''Enter the value of the root node: ''' ).strip().lower()
SCREAMING_SNAKE_CASE_ = queue.Queue()
SCREAMING_SNAKE_CASE_ = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ = q.get()
SCREAMING_SNAKE_CASE_ = F'Enter the left node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ = left_node
q.put(__a )
SCREAMING_SNAKE_CASE_ = F'Enter the right node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ = right_node
q.put(__a )
raise
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ) or not node:
return
print(node.data, end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ) or not node:
return
in_order(node.left )
print(node.data, end=''',''' )
in_order(node.right )
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data, end=''',''' )
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ) or not node:
return
SCREAMING_SNAKE_CASE_ = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ = q.get()
print(node_dequeued.data, end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ) or not node:
return
SCREAMING_SNAKE_CASE_ = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ = []
while not q.empty():
SCREAMING_SNAKE_CASE_ = q.get()
print(node_dequeued.data, end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ) or not node:
return
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data, end=''',''' )
stack.append(__a )
SCREAMING_SNAKE_CASE_ = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE_ = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE_ = n.right
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ) or not node:
return
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = node
while n or stack:
while n:
stack.append(__a )
SCREAMING_SNAKE_CASE_ = n.left
SCREAMING_SNAKE_CASE_ = stack.pop()
print(n.data, end=''',''' )
SCREAMING_SNAKE_CASE_ = n.right
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ) or not node:
return
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = [], []
SCREAMING_SNAKE_CASE_ = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data, end=''',''' )
def _lowerCamelCase ( __a = "", __a=50, __a="*" ):
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = divmod(width - len(__a ) - 2, 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
lowerCAmelCase__ = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt()) | 700 |
"""simple docstring"""
import cva
import numpy as np
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__(self ):
"""simple docstring"""
return str(self.k )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(SCREAMING_SNAKE_CASE_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE_ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase__ = HarrisCorner(0.04, 3)
lowerCAmelCase__, lowerCAmelCase__ = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 628 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) | 701 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = pipe.image_variation(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 628 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase__ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''maskformer'''
UpperCAmelCase__ = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ = ['''resnet''', '''swin''']
UpperCAmelCase__ = ['''detr''']
def __init__(self , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 20.0 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_ = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_ = (
decoder_config.pop('''model_type''' ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_ = fpn_feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_ = cross_entropy_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 702 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return x.sum()
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [1, 2]
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': [1, 2], '''b''': [3, 4]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 1}, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = [2, 3]
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': [2, 3], '''b''': [4, 5]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 2}, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 2
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 0, '''c''': 2}
SCREAMING_SNAKE_CASE_ = {
'''a''': np.eye(2 ).astype(SCREAMING_SNAKE_CASE_ ),
'''b''': np.zeros(3 ).astype(SCREAMING_SNAKE_CASE_ ),
'''c''': np.ones(2 ).astype(SCREAMING_SNAKE_CASE_ ),
}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 1 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 3, '''b''': 4}
SCREAMING_SNAKE_CASE_ = {'''a''': 5, '''b''': 6}
SCREAMING_SNAKE_CASE_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
class snake_case :
UpperCAmelCase__ = '''bar'''
SCREAMING_SNAKE_CASE_ = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(SCREAMING_SNAKE_CASE_ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowerCamelCase ( __a, __a, __a ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE_ = {F'{i}': i for i in range(__a )}
SCREAMING_SNAKE_CASE_ = map_nested(lambda __a : x + 10, __a, num_proc=__a, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( __lowercase ):
@require_tf
def _lowercase (self ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE_ = layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE_ = tf.random.uniform((1, 3) )
return model(SCREAMING_SNAKE_CASE_ ).numpy()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _lowercase (self ):
"""simple docstring"""
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE_ = torch.rand(1 , 3 )
return model(SCREAMING_SNAKE_CASE_ ).detach().numpy()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _lowercase (self ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''', [{}] )
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''', [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
], )
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).flatten()
assert output == expected_output
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = A(x=1, y='''foobar''' )
SCREAMING_SNAKE_CASE_ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__a ) == expected_output
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__a ) == expected_output
with pytest.raises(__a ):
asdict([1, A(x=10, y='''foo''' )] )
def _lowerCamelCase ( __a ):
return text.split()
def _lowerCamelCase ( __a ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCamelCase ( ):
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = []
for yield_time, content in iflatmap_unordered(
__a, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__a )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__a ) == 4 | 628 | 0 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''', out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
SCREAMING_SNAKE_CASE_ = MaskFormerConfig(backbone_config=__a )
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE_ = 847
SCREAMING_SNAKE_CASE_ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE_ = 150
SCREAMING_SNAKE_CASE_ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE_ = 171
SCREAMING_SNAKE_CASE_ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE_ = 133
SCREAMING_SNAKE_CASE_ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE_ = 19
SCREAMING_SNAKE_CASE_ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE_ = 65
SCREAMING_SNAKE_CASE_ = '''mapillary-vistas-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
return config
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = dct.pop(__a )
SCREAMING_SNAKE_CASE_ = val
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-dim :]
# fmt: on
def _lowerCamelCase ( __a, __a ):
# fmt: off
SCREAMING_SNAKE_CASE_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE_ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE_ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-hidden_size :]
# fmt: on
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( __a, __a, __a, __a = False ):
SCREAMING_SNAKE_CASE_ = get_maskformer_config(__a )
# load original state_dict
with open(__a, '''rb''' ) as f:
SCREAMING_SNAKE_CASE_ = pickle.load(__a )
SCREAMING_SNAKE_CASE_ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE_ = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a, __a, __a )
read_in_swin_q_k_v(__a, config.backbone_config )
read_in_decoder_q_k_v(__a, __a )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
# load 🤗 model
SCREAMING_SNAKE_CASE_ = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a, param.shape )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = model.load_state_dict(__a, strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
SCREAMING_SNAKE_CASE_ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE_ = 65
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE_ = 65_535
else:
SCREAMING_SNAKE_CASE_ = 255
SCREAMING_SNAKE_CASE_ = True if '''ade''' in model_name else False
SCREAMING_SNAKE_CASE_ = MaskFormerImageProcessor(ignore_index=__a, reduce_labels=__a )
SCREAMING_SNAKE_CASE_ = image_processor(__a, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = model(**__a )
print('''Logits:''', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], __a, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 703 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase__ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''maskformer'''
UpperCAmelCase__ = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ = ['''resnet''', '''swin''']
UpperCAmelCase__ = ['''detr''']
def __init__(self , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 20.0 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_ = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_ = (
decoder_config.pop('''model_type''' ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_ = fpn_feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_ = cross_entropy_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 628 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( __a ):
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__a, '''_dynamo''' ):
return False
return isinstance(__a, torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( __a, __a = True ):
SCREAMING_SNAKE_CASE_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ = is_compiled_module(__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ = getattr(__a, '''forward''' )
SCREAMING_SNAKE_CASE_ = model.__dict__.pop('''_original_forward''', __a )
if original_forward is not None:
while hasattr(__a, '''__wrapped__''' ):
SCREAMING_SNAKE_CASE_ = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ = forward
if getattr(__a, '''_converted_to_transformer_engine''', __a ):
convert_model(__a, to_transformer_engine=__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = compiled_model
return model
def _lowerCamelCase ( ):
PartialState().wait_for_everyone()
def _lowerCamelCase ( __a, __a ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__a, __a )
elif PartialState().local_process_index == 0:
torch.save(__a, __a )
@contextmanager
def _lowerCamelCase ( **__a ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ = str(__a )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( __a ):
if not hasattr(__a, '''__qualname__''' ) and not hasattr(__a, '''__name__''' ):
SCREAMING_SNAKE_CASE_ = getattr(__a, '''__class__''', __a )
if hasattr(__a, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__a, '''__name__''' ):
return obj.__name__
return str(__a )
def _lowerCamelCase ( __a, __a ):
for key, value in source.items():
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = destination.setdefault(__a, {} )
merge_dicts(__a, __a )
else:
SCREAMING_SNAKE_CASE_ = value
return destination
def _lowerCamelCase ( __a = None ):
if port is None:
SCREAMING_SNAKE_CASE_ = 29_500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0 | 704 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE_ = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE_ = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE_ = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE_ = 128
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ = 192
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ = 256
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ = 352
# set label information
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE_ = '''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = FocalNetConfig(
embed_dim=__a, depths=__a, focal_levels=__a, focal_windows=__a, use_conv_embed=__a, idalabel=__a, labelaid=__a, use_post_layernorm=__a, use_layerscale=__a, )
return config
def _lowerCamelCase ( __a ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ = '''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''encoder.layers''', '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''downsample.proj''', '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.f''', '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.h''', '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.proj''', '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ = '''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head''', '''classifier''' )
else:
SCREAMING_SNAKE_CASE_ = '''focalnet.''' + name
return name
def _lowerCamelCase ( __a, __a, __a=False ):
# fmt: off
SCREAMING_SNAKE_CASE_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''', __a )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__a, map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = get_focalnet_config(__a )
SCREAMING_SNAKE_CASE_ = FocalNetForImageClassification(__a )
model.eval()
# load state dict
model.load_state_dict(__a )
# verify conversion
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__a, size={'''shortest_edge''': 256}, resample=PILImageResampling.BILINEAR, do_center_crop=__a, crop_size=224, do_normalize=__a, image_mean=__a, image_std=__a, )
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
SCREAMING_SNAKE_CASE_ = processor(images=__a, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
SCREAMING_SNAKE_CASE_ = image_transforms(__a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, __a, atol=1E-4 )
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''', outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3], __a, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 628 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 705 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''glpn'''
def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[32, 64, 1_60, 2_56] , SCREAMING_SNAKE_CASE_=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE_=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=-1 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index | 628 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( __a, __a, __a, ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 706 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 2_55.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 628 | 0 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = LayoutLMTokenizer
UpperCAmelCase__ = LayoutLMTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = '''unwanted, running'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [7, 4, 5, 10, 8, 9] )
def _lowercase (self ):
"""simple docstring"""
pass | 707 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case ( __lowercase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_="cls" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = project_dim
SCREAMING_SNAKE_CASE_ = pooler_fn
SCREAMING_SNAKE_CASE_ = learn_encoder
SCREAMING_SNAKE_CASE_ = use_attention_mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = [R'''pooler''', R'''logit_scale''']
UpperCAmelCase__ = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCAmelCase__ = '''roberta'''
UpperCAmelCase__ = RobertaSeriesConfig
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE_ , '''has_pre_transformation''' , SCREAMING_SNAKE_CASE_ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.base_model(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=SCREAMING_SNAKE_CASE_ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = outputs['''hidden_states'''][-2]
SCREAMING_SNAKE_CASE_ = self.pre_LN(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.transformation_pre(SCREAMING_SNAKE_CASE_ )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE_ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 628 | 0 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCAmelCase__ = datasets.logging.get_logger(__name__)
lowerCAmelCase__ = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
lowerCAmelCase__ = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
lowerCAmelCase__ = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def _lowerCamelCase ( __a, __a, __a=False, __a=False, __a=True, __a=False, __a="dummy_doc" ):
SCREAMING_SNAKE_CASE_ = {doc: key_lines}
SCREAMING_SNAKE_CASE_ = {doc: sys_lines}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = reader.get_doc_mentions(__a, key_doc_lines[doc], __a )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ = reader.set_annotated_parse_trees(__a, key_doc_lines[doc], __a, __a )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = reader.get_doc_mentions(__a, sys_doc_lines[doc], __a )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_ = reader.set_annotated_parse_trees(__a, key_doc_lines[doc], __a, __a )
if remove_nested:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = reader.remove_nested_coref_mentions(__a, __a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = reader.remove_nested_coref_mentions(__a, __a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ = reader.get_mention_assignments(__a, __a )
SCREAMING_SNAKE_CASE_ = reader.get_mention_assignments(__a, __a )
SCREAMING_SNAKE_CASE_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = get_coref_infos(__a, __a, __a, __a, __a, __a )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for name, metric in metrics:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = evaluator.evaluate_documents(__a, __a, beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ), F'Recall: {recall * 100:.2f}', F' Precision: {precision * 100:.2f}', F' F1: {fa * 100:.2f}', )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE_ = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE_ = line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def _lowercase (self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE_ = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE_ )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE_ = evaluate(
key_lines=SCREAMING_SNAKE_CASE_ , sys_lines=SCREAMING_SNAKE_CASE_ , metrics=SCREAMING_SNAKE_CASE_ , NP_only=SCREAMING_SNAKE_CASE_ , remove_nested=SCREAMING_SNAKE_CASE_ , keep_singletons=SCREAMING_SNAKE_CASE_ , min_span=SCREAMING_SNAKE_CASE_ , )
return score | 708 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase ( __a, __a=0.9_9_9, __a="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE_ = []
for i in range(__a ):
SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ), __a ) )
return torch.tensor(__a, dtype=torch.floataa )
class snake_case ( __lowercase , __lowercase ):
UpperCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ = 2
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_00_85 , SCREAMING_SNAKE_CASE_ = 0.0_12 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "linspace" , SCREAMING_SNAKE_CASE_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ = torch.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE_ = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = use_karras_sigmas
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ = self.timesteps
SCREAMING_SNAKE_CASE_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
SCREAMING_SNAKE_CASE_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(SCREAMING_SNAKE_CASE_ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.interp(SCREAMING_SNAKE_CASE_ , np.arange(0 , len(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE_ = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE_ , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = timesteps.to(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = timesteps.to(device=SCREAMING_SNAKE_CASE_ )
# empty dt and derivative
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ = defaultdict(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
# get distribution
SCREAMING_SNAKE_CASE_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ = low_idx + 1
SCREAMING_SNAKE_CASE_ = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ = np.clip(SCREAMING_SNAKE_CASE_ , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ = t.reshape(sigma.shape )
return t
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE_ = in_sigmas[0].item()
SCREAMING_SNAKE_CASE_ = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE_ = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE_ = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self ):
"""simple docstring"""
return self.dt is None
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ = derivative
SCREAMING_SNAKE_CASE_ = dt
SCREAMING_SNAKE_CASE_ = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE_ = self.dt
SCREAMING_SNAKE_CASE_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for t in timesteps]
SCREAMING_SNAKE_CASE_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps | 628 | 0 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = WavaVecaPhonemeCTCTokenizer
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
SCREAMING_SNAKE_CASE_ = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
SCREAMING_SNAKE_CASE_ = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
SCREAMING_SNAKE_CASE_ = toks + toks
# toks_str = [t[1] for t in toks]
SCREAMING_SNAKE_CASE_ = [t[0] for t in toks]
# Ensure consistency
SCREAMING_SNAKE_CASE_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
SCREAMING_SNAKE_CASE_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
SCREAMING_SNAKE_CASE_ = ''' ''' + output_txt
SCREAMING_SNAKE_CASE_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
SCREAMING_SNAKE_CASE_ = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
SCREAMING_SNAKE_CASE_ = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
SCREAMING_SNAKE_CASE_ = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3)
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE_ = '''Hello how are you'''
SCREAMING_SNAKE_CASE_ = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE_ = '''Hello how are you'''
SCREAMING_SNAKE_CASE_ = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE_ = '''Hello how are you'''
SCREAMING_SNAKE_CASE_ = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
SCREAMING_SNAKE_CASE_ = tokenizer.decode(sample_ids[0] )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE_ = '''Hello how are you'''
SCREAMING_SNAKE_CASE_ = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE_ = '''Hello how are you'''
SCREAMING_SNAKE_CASE_ = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
SCREAMING_SNAKE_CASE_ = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
SCREAMING_SNAKE_CASE_ = tokenizer.decode(sample_ids[0] )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
SCREAMING_SNAKE_CASE_ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE_ = '''Hello how are you'''
SCREAMING_SNAKE_CASE_ = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
SCREAMING_SNAKE_CASE_ = '''Hello how are you'''
SCREAMING_SNAKE_CASE_ = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''Hello how are you'''
SCREAMING_SNAKE_CASE_ = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids
SCREAMING_SNAKE_CASE_ = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
SCREAMING_SNAKE_CASE_ = '''Hello how Are you'''
SCREAMING_SNAKE_CASE_ = '''hello how are you'''
SCREAMING_SNAKE_CASE_ = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
SCREAMING_SNAKE_CASE_ = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
SCREAMING_SNAKE_CASE_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def _lowercase (SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [d[key] for d in offsets]
return retrieved_list
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
SCREAMING_SNAKE_CASE_ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
SCREAMING_SNAKE_CASE_ = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) )
# transform list to ModelOutput
SCREAMING_SNAKE_CASE_ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
[recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
SCREAMING_SNAKE_CASE_ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def _lowercase (self ):
"""simple docstring"""
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def _lowercase (self ):
"""simple docstring"""
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def _lowercase (self ):
"""simple docstring"""
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE_ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE_ = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE_ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE_ = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.vocab_size
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _lowercase (self ):
"""simple docstring"""
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
SCREAMING_SNAKE_CASE_ = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ ) | 709 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = 8
# DPR tok
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dataset''' )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
SCREAMING_SNAKE_CASE_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
SCREAMING_SNAKE_CASE_ = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary. | 628 | 0 |
"""simple docstring"""
import os
def _lowerCamelCase ( ):
with open(os.path.dirname(__a ) + '''/p022_names.txt''' ) as file:
SCREAMING_SNAKE_CASE_ = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE_ = names.replace('''"''', '''''' ).split(''',''' )
names.sort()
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for i, name in enumerate(__a ):
for letter in name:
name_score += ord(__a ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE_ = 0
return total_score
if __name__ == "__main__":
print(solution()) | 710 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = left
SCREAMING_SNAKE_CASE_ = right
def __repr__(self ):
"""simple docstring"""
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = collection
SCREAMING_SNAKE_CASE_ = function
if self.collection:
SCREAMING_SNAKE_CASE_ = self._build_tree(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self._update_tree(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self._query_range(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.collection[start] )
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = self._build_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE_ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.fn(node.left.val , node.right.val )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE_ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE_ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 628 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _lowerCamelCase ( __a ):
'''simple docstring'''
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2], unknown_args[1::2] )}
def _lowerCamelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ArgumentParser(
'''HuggingFace Datasets CLI tool''', usage='''datasets-cli <command> [<args>]''', allow_abbrev=__a )
SCREAMING_SNAKE_CASE_ = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__a )
EnvironmentCommand.register_subcommand(__a )
TestCommand.register_subcommand(__a )
RunBeamCommand.register_subcommand(__a )
DummyDataCommand.register_subcommand(__a )
# Parse args
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_known_args()
if not hasattr(__a, '''func''' ):
parser.print_help()
exit(1 )
SCREAMING_SNAKE_CASE_ = parse_unknown_args(__a )
# Run
SCREAMING_SNAKE_CASE_ = args.func(__a, **__a )
service.run()
if __name__ == "__main__":
main() | 711 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0'
raise ValueError(__a )
SCREAMING_SNAKE_CASE_ = 1
for i in range(1, __a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 | 0 |
"""simple docstring"""
import math
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = int(math.sqrt(__a ) ) # Size of every segment
SCREAMING_SNAKE_CASE_ = [True] * (end + 1)
SCREAMING_SNAKE_CASE_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__a )
for i in range(start * start, end + 1, __a ):
SCREAMING_SNAKE_CASE_ = False
start += 1
prime += in_prime
SCREAMING_SNAKE_CASE_ = end + 1
SCREAMING_SNAKE_CASE_ = min(2 * end, __a )
while low <= n:
SCREAMING_SNAKE_CASE_ = [True] * (high - low + 1)
for each in in_prime:
SCREAMING_SNAKE_CASE_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__a, high + 1, __a ):
SCREAMING_SNAKE_CASE_ = False
for j in range(len(__a ) ):
if temp[j] is True:
prime.append(j + low )
SCREAMING_SNAKE_CASE_ = high + 1
SCREAMING_SNAKE_CASE_ = min(high + end, __a )
return prime
print(sieve(10**6)) | 712 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE_ = i + 1
else:
SCREAMING_SNAKE_CASE_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''') | 628 | 0 |
"""simple docstring"""
from random import randint, random
def _lowerCamelCase ( __a, __a, __a, __a = False, __a = False, __a = 5, ):
SCREAMING_SNAKE_CASE_ = [[-1] * number_of_cells] # Create a highway without any car
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = max(__a, 0 )
while i < number_of_cells:
SCREAMING_SNAKE_CASE_ = (
randint(0, __a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1, max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = highway_now[car_index + 1 :]
for cell in range(len(__a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__a, -1 )
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = len(__a )
# Beforce calculations, the highway is empty
SCREAMING_SNAKE_CASE_ = [-1] * number_of_cells
for car_index in range(__a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
SCREAMING_SNAKE_CASE_ = min(highway_now[car_index] + 1, __a )
# Number of empty cell before the next car
SCREAMING_SNAKE_CASE_ = get_distance(__a, __a ) - 1
# We can't have the car causing an accident
SCREAMING_SNAKE_CASE_ = min(next_highway[car_index], __a )
if random() < probability:
# Randomly, a driver will slow down
SCREAMING_SNAKE_CASE_ = max(next_highway[car_index] - 1, 0 )
return next_highway
def _lowerCamelCase ( __a, __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = len(highway[0] )
for i in range(__a ):
SCREAMING_SNAKE_CASE_ = update(highway[i], __a, __a )
SCREAMING_SNAKE_CASE_ = [-1] * number_of_cells
for car_index in range(__a ):
SCREAMING_SNAKE_CASE_ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
SCREAMING_SNAKE_CASE_ = (car_index + speed) % number_of_cells
# Commit the change of position
SCREAMING_SNAKE_CASE_ = speed
highway.append(__a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod() | 713 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a=2, __a=3, __a=16, __a = 10, __a = 2 ):
def get_dataset(__a ):
SCREAMING_SNAKE_CASE_ = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(__a, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=None ):
SCREAMING_SNAKE_CASE_ = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = batch
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(__a, __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case ( nn.Module ):
def __init__(self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return x * self.a + self.b
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE_ = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE_ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE_ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase__ = DummyModel()
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCAmelCase__, lowerCAmelCase__ = dummy_dataloaders()
lowerCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 628 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 714 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _lowercase (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ = samplea.shape[0]
SCREAMING_SNAKE_CASE_ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop()
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 628 | 0 |
"""simple docstring"""
import sys
from pathlib import Path
lowerCAmelCase__ = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCAmelCase__ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
lowerCAmelCase__ = 'zero2'
lowerCAmelCase__ = 'zero3'
lowerCAmelCase__ = [ZEROa, ZEROa]
def _lowerCamelCase ( __a, __a, __a ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
SCREAMING_SNAKE_CASE_ = parameterized.to_safe_name('''_'''.join(str(__a ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
lowerCAmelCase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class snake_case ( __lowercase ):
@parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.run_and_check(
stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.run_and_check(
stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
@parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.run_and_check(
stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.run_and_check(
stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
pass
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = models[model]
SCREAMING_SNAKE_CASE_ = self.run_trainer(
stage=SCREAMING_SNAKE_CASE_ , model_name=SCREAMING_SNAKE_CASE_ , eval_steps=SCREAMING_SNAKE_CASE_ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
self.do_checks(SCREAMING_SNAKE_CASE_ )
return output_dir
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir('''./xxx''' , after=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = f'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(SCREAMING_SNAKE_CASE_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
SCREAMING_SNAKE_CASE_ = f'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
SCREAMING_SNAKE_CASE_ = [f'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
SCREAMING_SNAKE_CASE_ = self.get_launcher(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=self.get_env() )
return output_dir
def _lowercase (self , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = min(2 , get_gpu_count() ) if distributed else 1
return f'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 715 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = TransfoXLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [0, 4, 8, 7] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' ) | 628 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''num_encoder_blocks''' ) )
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[16, 32, 64, 1_28] , SCREAMING_SNAKE_CASE_=[1, 4, 8, 16] , SCREAMING_SNAKE_CASE_=[1, 2, 4, 8] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = downsampling_rates
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def _lowercase (self ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = SegformerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertGreater(result.loss , 0.0 )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = SegformerModelTester(self )
SCREAMING_SNAKE_CASE_ = SegformerConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def _lowercase (self ):
"""simple docstring"""
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
SCREAMING_SNAKE_CASE_ = sum(self.model_tester.depths )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // 32) ** 2
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first attentions (first block, first layer)
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // 4) ** 2
SCREAMING_SNAKE_CASE_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowercase (self ):
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase (self ):
"""simple docstring"""
pass
@slow
def _lowercase (self ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = SegformerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class snake_case ( unittest.TestCase ):
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-1 ) )
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ , target_sizes=[(5_00, 3_00)] )
SCREAMING_SNAKE_CASE_ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ ) | 716 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__a, max_perimeter + 1 ):
SCREAMING_SNAKE_CASE_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__a ):
SCREAMING_SNAKE_CASE_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCamelCase ( __a = 1_000 ):
SCREAMING_SNAKE_CASE_ = pythagorean_triple(__a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''') | 628 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''deformable_detr'''
UpperCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=3_00 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="sine" , SCREAMING_SNAKE_CASE_="resnet50" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=3_00 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.25 , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = use_timm_backbone
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = auxiliary_loss
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = backbone
SCREAMING_SNAKE_CASE_ = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ = dilation
# deformable attributes
SCREAMING_SNAKE_CASE_ = num_feature_levels
SCREAMING_SNAKE_CASE_ = encoder_n_points
SCREAMING_SNAKE_CASE_ = decoder_n_points
SCREAMING_SNAKE_CASE_ = two_stage
SCREAMING_SNAKE_CASE_ = two_stage_num_proposals
SCREAMING_SNAKE_CASE_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
SCREAMING_SNAKE_CASE_ = focal_alpha
SCREAMING_SNAKE_CASE_ = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _lowercase (self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase (self ):
"""simple docstring"""
return self.d_model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 717 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( __a ):
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__a, '''_dynamo''' ):
return False
return isinstance(__a, torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( __a, __a = True ):
SCREAMING_SNAKE_CASE_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ = is_compiled_module(__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ = getattr(__a, '''forward''' )
SCREAMING_SNAKE_CASE_ = model.__dict__.pop('''_original_forward''', __a )
if original_forward is not None:
while hasattr(__a, '''__wrapped__''' ):
SCREAMING_SNAKE_CASE_ = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ = forward
if getattr(__a, '''_converted_to_transformer_engine''', __a ):
convert_model(__a, to_transformer_engine=__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = compiled_model
return model
def _lowerCamelCase ( ):
PartialState().wait_for_everyone()
def _lowerCamelCase ( __a, __a ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__a, __a )
elif PartialState().local_process_index == 0:
torch.save(__a, __a )
@contextmanager
def _lowerCamelCase ( **__a ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ = str(__a )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( __a ):
if not hasattr(__a, '''__qualname__''' ) and not hasattr(__a, '''__name__''' ):
SCREAMING_SNAKE_CASE_ = getattr(__a, '''__class__''', __a )
if hasattr(__a, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__a, '''__name__''' ):
return obj.__name__
return str(__a )
def _lowerCamelCase ( __a, __a ):
for key, value in source.items():
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = destination.setdefault(__a, {} )
merge_dicts(__a, __a )
else:
SCREAMING_SNAKE_CASE_ = value
return destination
def _lowerCamelCase ( __a = None ):
if port is None:
SCREAMING_SNAKE_CASE_ = 29_500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0 | 628 | 0 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a=2, __a=3, __a=16, __a = 10, __a = 2 ):
def get_dataset(__a ):
SCREAMING_SNAKE_CASE_ = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(__a, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=None ):
SCREAMING_SNAKE_CASE_ = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = batch
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(__a, __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case ( nn.Module ):
def __init__(self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return x * self.a + self.b
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE_ = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE_ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE_ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase__ = DummyModel()
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCAmelCase__, lowerCAmelCase__ = dummy_dataloaders()
lowerCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 718 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) | 628 | 0 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowerCamelCase ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__a ):
requests.request('''GET''', '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''', '''https://huggingface.co''', timeout=1.0 )
@pytest.mark.integration
def _lowerCamelCase ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''', '''https://huggingface.co''' )
def _lowerCamelCase ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__a ):
http_head('''https://huggingface.co''' ) | 719 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __a, __a, __a, __a ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__a )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__a )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__a, __a, is_trivia_qa=__a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 628 | 0 |
"""simple docstring"""
def _lowerCamelCase ( __a, __a ):
return [sentence[i : i + ngram_size] for i in range(len(__a ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod() | 720 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( __a ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''', __a, )
if isinstance(__a, torch.Tensor ):
return image
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [image]
if isinstance(image[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = image[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE_ = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = np.array(__a ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = image.transpose(0, 3, 1, 2 )
SCREAMING_SNAKE_CASE_ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(image[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return image
def _lowerCamelCase ( __a ):
if isinstance(__a, torch.Tensor ):
return mask
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [mask]
if isinstance(mask[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = mask[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_ = [np.array(m.convert('''L''' ).resize((w, h), resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(mask[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2_50 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = _preprocess_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ = original_image.shape
SCREAMING_SNAKE_CASE_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
SCREAMING_SNAKE_CASE_ = eta
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE_ = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE_ = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ ) | 628 | 0 |
lowerCAmelCase__ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
lowerCAmelCase__ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
lowerCAmelCase__ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
lowerCAmelCase__ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
lowerCAmelCase__ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
lowerCAmelCase__ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
lowerCAmelCase__ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
lowerCAmelCase__ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 721 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a, __a ):
return (preds == labels).mean()
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCAmelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__a, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__a, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__a ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__a, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__a, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__a, args=__a, train_dataset=__a, eval_dataset=__a, compute_metrics=__a, data_collator=__a, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(__a, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', __a, __a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__a )
return results
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 628 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 700 |
"""simple docstring"""
import cva
import numpy as np
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__(self ):
"""simple docstring"""
return str(self.k )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(SCREAMING_SNAKE_CASE_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(SCREAMING_SNAKE_CASE_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE_ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
lowerCAmelCase__ = HarrisCorner(0.04, 3)
lowerCAmelCase__, lowerCAmelCase__ = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 628 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class snake_case ( __lowercase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(__a ) )]
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
SCREAMING_SNAKE_CASE_ = all_rotations(__a )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ = {
'''bwt_string''': ''''''.join([word[-1] for word in rotations] ),
'''idx_original_string''': rotations.index(__a ),
}
return response
def _lowerCamelCase ( __a, __a ):
if not isinstance(__a, __a ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
SCREAMING_SNAKE_CASE_ = int(__a )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(__a ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
SCREAMING_SNAKE_CASE_ = [''''''] * len(__a )
for _ in range(len(__a ) ):
for i in range(len(__a ) ):
SCREAMING_SNAKE_CASE_ = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCAmelCase__ = 'Provide a string that I will generate its BWT transform: '
lowerCAmelCase__ = input(entry_msg).strip()
lowerCAmelCase__ = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
lowerCAmelCase__ = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
) | 701 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = pipe.image_variation(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 628 | 0 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
lowerCAmelCase__ = Path(__file__).parent / 'model_card_template.md'
lowerCAmelCase__ = uuida().hex
lowerCAmelCase__ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase__ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def _lowerCamelCase ( __a = None ):
SCREAMING_SNAKE_CASE_ = F'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'; torch/{_torch_version}'
if is_flax_available():
ua += F'; jax/{_jax_version}'
ua += F'; flax/{_flax_version}'
if is_onnx_available():
ua += F'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''', '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__a, __a ):
ua += "; " + "; ".join(F'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(__a, __a ):
ua += "; " + user_agent
return ua
def _lowerCamelCase ( __a, __a = None, __a = None ):
if token is None:
SCREAMING_SNAKE_CASE_ = HfFolder.get_token()
if organization is None:
SCREAMING_SNAKE_CASE_ = whoami(__a )['''name''']
return F'{username}/{model_id}'
else:
return F'{organization}/{model_id}'
def _lowerCamelCase ( __a, __a ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(__a, '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
SCREAMING_SNAKE_CASE_ = args.hub_token if hasattr(__a, '''hub_token''' ) else None
SCREAMING_SNAKE_CASE_ = get_full_repo_name(__a, token=__a )
SCREAMING_SNAKE_CASE_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''', license='''apache-2.0''', library_name='''diffusers''', tags=[], datasets=args.dataset_name, metrics=[], ), template_path=__a, model_name=__a, repo_name=__a, dataset_name=args.dataset_name if hasattr(__a, '''dataset_name''' ) else None, learning_rate=args.learning_rate, train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__a, '''gradient_accumulation_steps''' ) else None
), adam_betaa=args.adam_betaa if hasattr(__a, '''adam_beta1''' ) else None, adam_betaa=args.adam_betaa if hasattr(__a, '''adam_beta2''' ) else None, adam_weight_decay=args.adam_weight_decay if hasattr(__a, '''adam_weight_decay''' ) else None, adam_epsilon=args.adam_epsilon if hasattr(__a, '''adam_epsilon''' ) else None, lr_scheduler=args.lr_scheduler if hasattr(__a, '''lr_scheduler''' ) else None, lr_warmup_steps=args.lr_warmup_steps if hasattr(__a, '''lr_warmup_steps''' ) else None, ema_inv_gamma=args.ema_inv_gamma if hasattr(__a, '''ema_inv_gamma''' ) else None, ema_power=args.ema_power if hasattr(__a, '''ema_power''' ) else None, ema_max_decay=args.ema_max_decay if hasattr(__a, '''ema_max_decay''' ) else None, mixed_precision=args.mixed_precision, )
SCREAMING_SNAKE_CASE_ = os.path.join(args.output_dir, '''README.md''' )
model_card.save(__a )
def _lowerCamelCase ( __a, __a = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
SCREAMING_SNAKE_CASE_ = str(Path(__a ).as_posix() )
SCREAMING_SNAKE_CASE_ = re.search(r'''snapshots/([^/]+)/''', __a )
if search is None:
return None
SCREAMING_SNAKE_CASE_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase__ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
lowerCAmelCase__ = os.path.join(hf_cache_home, 'diffusers')
def _lowerCamelCase ( __a = None, __a = None ):
if new_cache_dir is None:
SCREAMING_SNAKE_CASE_ = DIFFUSERS_CACHE
if old_cache_dir is None:
SCREAMING_SNAKE_CASE_ = old_diffusers_cache
SCREAMING_SNAKE_CASE_ = Path(__a ).expanduser()
SCREAMING_SNAKE_CASE_ = Path(__a ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
SCREAMING_SNAKE_CASE_ = new_cache_dir / old_blob_path.relative_to(__a )
new_blob_path.parent.mkdir(parents=__a, exist_ok=__a )
os.replace(__a, __a )
try:
os.symlink(__a, __a )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase__ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
lowerCAmelCase__ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase__ = int(f.read())
except ValueError:
lowerCAmelCase__ = 0
if cache_version < 1:
lowerCAmelCase__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
lowerCAmelCase__ = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
'the directory exists and can be written to.'
)
def _lowerCamelCase ( __a, __a = None ):
if variant is not None:
SCREAMING_SNAKE_CASE_ = weights_name.split('''.''' )
SCREAMING_SNAKE_CASE_ = splits[:-1] + [variant] + splits[-1:]
SCREAMING_SNAKE_CASE_ = '''.'''.join(__a )
return weights_name
def _lowerCamelCase ( __a, *,
__a, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a=None, ):
SCREAMING_SNAKE_CASE_ = str(__a )
if os.path.isfile(__a ):
return pretrained_model_name_or_path
elif os.path.isdir(__a ):
if os.path.isfile(os.path.join(__a, __a ) ):
# Load from a PyTorch checkpoint
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__a, __a, __a ) ):
SCREAMING_SNAKE_CASE_ = os.path.join(__a, __a, __a )
return model_file
else:
raise EnvironmentError(
F'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__a ).base_version ) >= version.parse('''0.20.0''' )
):
try:
SCREAMING_SNAKE_CASE_ = hf_hub_download(
__a, filename=_add_variant(__a, __a ), cache_dir=__a, force_download=__a, proxies=__a, resume_download=__a, local_files_only=__a, use_auth_token=__a, user_agent=__a, subfolder=__a, revision=revision or commit_hash, )
warnings.warn(
F'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.', __a, )
return model_file
except: # noqa: E722
warnings.warn(
F'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__a, __a )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__a, __a )}\' so that the correct variant file can be added.', __a, )
try:
# 2. Load model file as usual
SCREAMING_SNAKE_CASE_ = hf_hub_download(
__a, filename=__a, cache_dir=__a, force_download=__a, proxies=__a, resume_download=__a, local_files_only=__a, use_auth_token=__a, user_agent=__a, subfolder=__a, revision=revision or commit_hash, )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
'''this model name. Check the model page at '''
F'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
F'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
F'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
F'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
F' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
F' directory containing a file named {weights_name} or'
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
F'containing a file named {weights_name}' ) | 702 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return x.sum()
def _lowerCamelCase ( __a ): # picklable for multiprocessing
return i + 1
@dataclass
class snake_case :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = [1, 2]
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': [1, 2], '''b''': [3, 4]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 1}, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = [2, 3]
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': [2, 3], '''b''': [4, 5]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''1''': 2}, '''b''': 3}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 2
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
SCREAMING_SNAKE_CASE_ = {'''a''': 2, '''b''': 0, '''c''': 2}
SCREAMING_SNAKE_CASE_ = {
'''a''': np.eye(2 ).astype(SCREAMING_SNAKE_CASE_ ),
'''b''': np.zeros(3 ).astype(SCREAMING_SNAKE_CASE_ ),
'''c''': np.ones(2 ).astype(SCREAMING_SNAKE_CASE_ ),
}
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , map_numpy=SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 1 , SCREAMING_SNAKE_CASE_ , num_proc=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE_ = {'''a''': 3, '''b''': 4}
SCREAMING_SNAKE_CASE_ = {'''a''': 5, '''b''': 6}
SCREAMING_SNAKE_CASE_ = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
class snake_case :
UpperCAmelCase__ = '''bar'''
SCREAMING_SNAKE_CASE_ = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(SCREAMING_SNAKE_CASE_ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''', [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowerCamelCase ( __a, __a, __a ):
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
SCREAMING_SNAKE_CASE_ = {F'{i}': i for i in range(__a )}
SCREAMING_SNAKE_CASE_ = map_nested(lambda __a : x + 10, __a, num_proc=__a, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class snake_case ( __lowercase ):
@require_tf
def _lowercase (self ):
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
SCREAMING_SNAKE_CASE_ = layers.Dense(2 )
def gen_random_output():
SCREAMING_SNAKE_CASE_ = tf.random.uniform((1, 3) )
return model(SCREAMING_SNAKE_CASE_ ).numpy()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_tensorflow=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _lowercase (self ):
"""simple docstring"""
import torch
def gen_random_output():
SCREAMING_SNAKE_CASE_ = torch.nn.Linear(3 , 2 )
SCREAMING_SNAKE_CASE_ = torch.rand(1 , 3 )
return model(SCREAMING_SNAKE_CASE_ ).detach().numpy()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 , set_pytorch=SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _lowercase (self ):
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
with temp_seed(42 ):
SCREAMING_SNAKE_CASE_ = gen_random_output()
SCREAMING_SNAKE_CASE_ = gen_random_output()
np.testing.assert_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''', [{}] )
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''', [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
], )
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = NestedDataStructure(__a ).flatten()
assert output == expected_output
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = A(x=1, y='''foobar''' )
SCREAMING_SNAKE_CASE_ = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__a ) == expected_output
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': A(x=10, y='''foo''' )}, '''c''': [A(x=20, y='''bar''' )]}
SCREAMING_SNAKE_CASE_ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__a ) == expected_output
with pytest.raises(__a ):
asdict([1, A(x=10, y='''foo''' )] )
def _lowerCamelCase ( __a ):
return text.split()
def _lowerCamelCase ( __a ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCamelCase ( ):
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = list(iflatmap_unordered(__a, _split_text, kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__a ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
SCREAMING_SNAKE_CASE_ = []
for yield_time, content in iflatmap_unordered(
__a, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__a )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__a ) == 4 | 628 | 0 |
import os
from pathlib import Path
def _lowerCamelCase ( __a, __a, __a ):
SCREAMING_SNAKE_CASE_ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE_ = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
SCREAMING_SNAKE_CASE_ = F'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE_ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(__a, exist_ok=__a )
SCREAMING_SNAKE_CASE_ = os.path.join(__a, '''README.md''' )
print(F'Generating {path}' )
with open(__a, '''w''', encoding='''utf-8''' ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase__ = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = model_name.split('-')
lowerCAmelCase__ = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 703 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase__ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''maskformer'''
UpperCAmelCase__ = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ = ['''resnet''', '''swin''']
UpperCAmelCase__ = ['''detr''']
def __init__(self , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 20.0 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_ = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_ = (
decoder_config.pop('''model_type''' ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_ = fpn_feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_ = cross_entropy_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 628 | 0 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ = 'Muhammad Umer Farooq'
lowerCAmelCase__ = 'MIT'
lowerCAmelCase__ = '1.0.0'
lowerCAmelCase__ = 'Muhammad Umer Farooq'
lowerCAmelCase__ = 'contact@muhammadumerfarooq.me'
lowerCAmelCase__ = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = domain
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
SCREAMING_SNAKE_CASE_ = parse.urljoin(self.domain , SCREAMING_SNAKE_CASE_ )
self.urls.append(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( __a ):
return ".".join(get_sub_domain_name(__a ).split('''.''' )[-2:] )
def _lowerCamelCase ( __a ):
return parse.urlparse(__a ).netloc
def _lowerCamelCase ( __a = "https://github.com" ):
SCREAMING_SNAKE_CASE_ = get_domain_name(__a )
# Initialize the parser
SCREAMING_SNAKE_CASE_ = Parser(__a )
try:
# Open URL
SCREAMING_SNAKE_CASE_ = requests.get(__a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
SCREAMING_SNAKE_CASE_ = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
SCREAMING_SNAKE_CASE_ = requests.get(__a )
# Get the valid email.
SCREAMING_SNAKE_CASE_ = re.findall('''[a-zA-Z0-9]+@''' + domain, read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__a )
if __name__ == "__main__":
lowerCAmelCase__ = emails_from_url('https://github.com')
print(f'''{len(emails)} emails found:''')
print('\n'.join(sorted(emails))) | 704 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
SCREAMING_SNAKE_CASE_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE_ = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE_ = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE_ = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE_ = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE_ = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE_ = 128
elif "large" in model_name:
SCREAMING_SNAKE_CASE_ = 192
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE_ = 256
elif "huge" in model_name:
SCREAMING_SNAKE_CASE_ = 352
# set label information
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE_ = '''imagenet-22k-id2label.json'''
else:
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__a, __a, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__a ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = FocalNetConfig(
embed_dim=__a, depths=__a, focal_levels=__a, focal_windows=__a, use_conv_embed=__a, idalabel=__a, labelaid=__a, use_post_layernorm=__a, use_layerscale=__a, )
return config
def _lowerCamelCase ( __a ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''patch_embed.norm''', '''embeddings.norm''' )
if "layers" in name:
SCREAMING_SNAKE_CASE_ = '''encoder.''' + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''encoder.layers''', '''encoder.stages''' )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''downsample.proj''', '''downsample.projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.f''', '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.h''', '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''modulation.proj''', '''modulation.projection_out''' )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_ = '''layernorm.weight'''
if name == "norm.bias":
SCREAMING_SNAKE_CASE_ = '''layernorm.bias'''
if "head" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head''', '''classifier''' )
else:
SCREAMING_SNAKE_CASE_ = '''focalnet.''' + name
return name
def _lowerCamelCase ( __a, __a, __a=False ):
# fmt: off
SCREAMING_SNAKE_CASE_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
SCREAMING_SNAKE_CASE_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''', __a )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__a, map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = get_focalnet_config(__a )
SCREAMING_SNAKE_CASE_ = FocalNetForImageClassification(__a )
model.eval()
# load state dict
model.load_state_dict(__a )
# verify conversion
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__a, size={'''shortest_edge''': 256}, resample=PILImageResampling.BILINEAR, do_center_crop=__a, crop_size=224, do_normalize=__a, image_mean=__a, image_std=__a, )
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__a, stream=__a ).raw )
SCREAMING_SNAKE_CASE_ = processor(images=__a, return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
SCREAMING_SNAKE_CASE_ = image_transforms(__a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values, __a, atol=1E-4 )
SCREAMING_SNAKE_CASE_ = model(**__a )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''', model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''', outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3], __a, atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 628 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
@staticmethod
def _lowercase (*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
SCREAMING_SNAKE_CASE_ = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = object_detector(examples[0] , threshold=0.0 )
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
self.assertGreater(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{
'''score''': ANY(SCREAMING_SNAKE_CASE_ ),
'''label''': ANY(SCREAMING_SNAKE_CASE_ ),
'''box''': {'''xmin''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymin''': ANY(SCREAMING_SNAKE_CASE_ ), '''xmax''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymax''': ANY(SCREAMING_SNAKE_CASE_ )},
}
for i in range(SCREAMING_SNAKE_CASE_ )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def _lowercase (self ):
"""simple docstring"""
pass
@require_torch
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
SCREAMING_SNAKE_CASE_ = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
] , )
SCREAMING_SNAKE_CASE_ = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}},
]
] , )
@require_torch
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = pipeline('''zero-shot-object-detection''' )
SCREAMING_SNAKE_CASE_ = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
] , )
SCREAMING_SNAKE_CASE_ = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def _lowercase (self ):
"""simple docstring"""
pass
@require_torch
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0.2
SCREAMING_SNAKE_CASE_ = pipeline('''zero-shot-object-detection''' )
SCREAMING_SNAKE_CASE_ = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=SCREAMING_SNAKE_CASE_ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}},
] , )
@require_torch
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = pipeline('''zero-shot-object-detection''' )
SCREAMING_SNAKE_CASE_ = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=SCREAMING_SNAKE_CASE_ , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}},
] , ) | 705 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''glpn'''
def __init__(self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[32, 64, 1_60, 2_56] , SCREAMING_SNAKE_CASE_=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE_=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE_=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-6 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=-1 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_encoder_blocks
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = sr_ratios
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = patch_sizes
SCREAMING_SNAKE_CASE_ = strides
SCREAMING_SNAKE_CASE_ = mlp_ratios
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = decoder_hidden_size
SCREAMING_SNAKE_CASE_ = max_depth
SCREAMING_SNAKE_CASE_ = head_in_index | 628 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''gpt_neo'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__(self , SCREAMING_SNAKE_CASE_=5_02_57 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_layers
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = resid_dropout
SCREAMING_SNAKE_CASE_ = embed_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = classifier_dropout
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = attention_types
SCREAMING_SNAKE_CASE_ = self.expand_attention_types_params(SCREAMING_SNAKE_CASE_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@staticmethod
def _lowercase (SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _lowerCamelCase ( __a, __a, __a, __a ):
import torch
SCREAMING_SNAKE_CASE_ = input.size()
SCREAMING_SNAKE_CASE_ = len(__a )
SCREAMING_SNAKE_CASE_ = shape[dimension]
SCREAMING_SNAKE_CASE_ = torch.arange(0, __a, __a )
SCREAMING_SNAKE_CASE_ = torch.div(sizedim - size, __a, rounding_mode='''floor''' ) + 1
SCREAMING_SNAKE_CASE_ = torch.arange(__a ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE_ = [slice(__a )] * rank
SCREAMING_SNAKE_CASE_ = indices
SCREAMING_SNAKE_CASE_ = input[s]
SCREAMING_SNAKE_CASE_ = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__a )
def _lowerCamelCase ( __a, __a ):
import torch
SCREAMING_SNAKE_CASE_ = torch.arange(1, __a )
SCREAMING_SNAKE_CASE_ = torch.remainder(__a, __a )
SCREAMING_SNAKE_CASE_ = remainders == 0
SCREAMING_SNAKE_CASE_ = candidates[divisor_indices]
SCREAMING_SNAKE_CASE_ = torch.max(__a )
return largest_divisor, torch.div(__a, __a, rounding_mode='''floor''' )
class snake_case ( __lowercase ):
@property
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _lowercase (self ):
"""simple docstring"""
return self._config.num_heads
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ = seqlen + 2
SCREAMING_SNAKE_CASE_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE_ = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE_ = common_inputs['''attention_mask''']
if self.use_past:
SCREAMING_SNAKE_CASE_ = ordered_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def _lowercase (self ):
"""simple docstring"""
return 13 | 706 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 2_55.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 628 | 0 |
def _lowerCamelCase ( __a ):
if len(__a ) < 2:
return collection
def circle_sort_util(__a, __a, __a ) -> bool:
SCREAMING_SNAKE_CASE_ = False
if low == high:
return swapped
SCREAMING_SNAKE_CASE_ = low
SCREAMING_SNAKE_CASE_ = high
while left < right:
if collection[left] > collection[right]:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
collection[right],
collection[left],
)
SCREAMING_SNAKE_CASE_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
collection[right + 1],
collection[left],
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = low + int((high - low) / 2 )
SCREAMING_SNAKE_CASE_ = circle_sort_util(__a, __a, __a )
SCREAMING_SNAKE_CASE_ = circle_sort_util(__a, mid + 1, __a )
return swapped or left_swap or right_swap
SCREAMING_SNAKE_CASE_ = True
while is_not_sorted is True:
SCREAMING_SNAKE_CASE_ = circle_sort_util(__a, 0, len(__a ) - 1 )
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted)) | 707 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case ( __lowercase ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_="cls" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = project_dim
SCREAMING_SNAKE_CASE_ = pooler_fn
SCREAMING_SNAKE_CASE_ = learn_encoder
SCREAMING_SNAKE_CASE_ = use_attention_mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = [R'''pooler''', R'''logit_scale''']
UpperCAmelCase__ = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCAmelCase__ = '''roberta'''
UpperCAmelCase__ = RobertaSeriesConfig
def __init__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = XLMRobertaModel(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE_ , '''has_pre_transformation''' , SCREAMING_SNAKE_CASE_ )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE_ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowercase (self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.base_model(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , position_ids=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , inputs_embeds=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=SCREAMING_SNAKE_CASE_ , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE_ = outputs['''hidden_states'''][-2]
SCREAMING_SNAKE_CASE_ = self.pre_LN(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.transformation_pre(SCREAMING_SNAKE_CASE_ )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE_ = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=SCREAMING_SNAKE_CASE_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 628 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCAmelCase__ = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 708 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase ( __a, __a=0.9_9_9, __a="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE_ = []
for i in range(__a ):
SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ), __a ) )
return torch.tensor(__a, dtype=torch.floataa )
class snake_case ( __lowercase , __lowercase ):
UpperCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ = 2
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_00_85 , SCREAMING_SNAKE_CASE_ = 0.0_12 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "linspace" , SCREAMING_SNAKE_CASE_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ = torch.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE_ = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = use_karras_sigmas
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ = self.timesteps
SCREAMING_SNAKE_CASE_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
SCREAMING_SNAKE_CASE_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(SCREAMING_SNAKE_CASE_ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.interp(SCREAMING_SNAKE_CASE_ , np.arange(0 , len(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE_ = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE_ , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = timesteps.to(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = timesteps.to(device=SCREAMING_SNAKE_CASE_ )
# empty dt and derivative
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ = defaultdict(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
# get distribution
SCREAMING_SNAKE_CASE_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ = low_idx + 1
SCREAMING_SNAKE_CASE_ = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ = np.clip(SCREAMING_SNAKE_CASE_ , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ = t.reshape(sigma.shape )
return t
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE_ = in_sigmas[0].item()
SCREAMING_SNAKE_CASE_ = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE_ = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE_ = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self ):
"""simple docstring"""
return self.dt is None
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ = derivative
SCREAMING_SNAKE_CASE_ = dt
SCREAMING_SNAKE_CASE_ = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE_ = self.dt
SCREAMING_SNAKE_CASE_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for t in timesteps]
SCREAMING_SNAKE_CASE_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps | 628 | 0 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase__ = logging.getLogger(__name__)
class snake_case ( __lowercase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , init_retrieval=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = None
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
SCREAMING_SNAKE_CASE_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE_ = str(distributed_port + 1 )
SCREAMING_SNAKE_CASE_ = dist.new_group(ranks=SCREAMING_SNAKE_CASE_ , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowercase (self ):
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=torch.floataa ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.empty(SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
dist.scatter(SCREAMING_SNAKE_CASE_ , src=0 , scatter_list=SCREAMING_SNAKE_CASE_ , group=self.process_group )
return target_tensor
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE_ = next((addr for addr in addrs if addr.startswith('''e''' )) , SCREAMING_SNAKE_CASE_ )
return ifname
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if not dist.is_initialized():
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self._main_retrieve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ )
# distributed training
SCREAMING_SNAKE_CASE_ = dist.get_world_size(group=self.process_group )
# gather logic
SCREAMING_SNAKE_CASE_ = None
if self._is_main():
SCREAMING_SNAKE_CASE_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(SCREAMING_SNAKE_CASE_ )]
dist.gather(torch.tensor(SCREAMING_SNAKE_CASE_ ) , dst=0 , gather_list=SCREAMING_SNAKE_CASE_ , group=self.process_group )
# scatter logic
SCREAMING_SNAKE_CASE_ = question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
if self._is_main():
assert len(SCREAMING_SNAKE_CASE_ ) == world_size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self._main_retrieve(torch.cat(SCREAMING_SNAKE_CASE_ ).numpy() , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ ), torch.tensor(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs] , target_type=torch.intaa )
SCREAMING_SNAKE_CASE_ = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ ) | 709 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = 8
# DPR tok
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dataset''' )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
SCREAMING_SNAKE_CASE_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
SCREAMING_SNAKE_CASE_ = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary. | 628 | 0 |
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def _lowerCamelCase ( __a, __a, __a=1E-12 ):
SCREAMING_SNAKE_CASE_ = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(__a, axis=1 ), a_min=__a ) ).T
SCREAMING_SNAKE_CASE_ = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(__a, axis=1 ), a_min=__a ) ).T
return jnp.matmul(__a, norm_emb_a.T )
class snake_case ( nn.Module ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = jnp.floataa
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModule(self.config.vision_config )
SCREAMING_SNAKE_CASE_ = nn.Dense(self.config.projection_dim , use_bias=SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
SCREAMING_SNAKE_CASE_ = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
SCREAMING_SNAKE_CASE_ = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
SCREAMING_SNAKE_CASE_ = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__(self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.vision_model(SCREAMING_SNAKE_CASE_ )[1]
SCREAMING_SNAKE_CASE_ = self.visual_projection(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = jax_cosine_distance(SCREAMING_SNAKE_CASE_ , self.special_care_embeds )
SCREAMING_SNAKE_CASE_ = jax_cosine_distance(SCREAMING_SNAKE_CASE_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
SCREAMING_SNAKE_CASE_ = 0.0
SCREAMING_SNAKE_CASE_ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
SCREAMING_SNAKE_CASE_ = jnp.round(SCREAMING_SNAKE_CASE_ , 3 )
SCREAMING_SNAKE_CASE_ = jnp.any(special_scores > 0 , axis=1 , keepdims=SCREAMING_SNAKE_CASE_ )
# Use a lower threshold if an image has any special care concept
SCREAMING_SNAKE_CASE_ = is_special_care * 0.01
SCREAMING_SNAKE_CASE_ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
SCREAMING_SNAKE_CASE_ = jnp.round(SCREAMING_SNAKE_CASE_ , 3 )
SCREAMING_SNAKE_CASE_ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class snake_case ( __lowercase ):
UpperCAmelCase__ = CLIPConfig
UpperCAmelCase__ = '''clip_input'''
UpperCAmelCase__ = FlaxStableDiffusionSafetyCheckerModule
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = jnp.floataa , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
if input_shape is None:
SCREAMING_SNAKE_CASE_ = (1, 2_24, 2_24, 3)
SCREAMING_SNAKE_CASE_ = self.module_class(config=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , input_shape=SCREAMING_SNAKE_CASE_ , seed=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , _do_init=_do_init )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = jax.random.normal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = jax.random.split(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {'''params''': params_rng, '''dropout''': dropout_rng}
SCREAMING_SNAKE_CASE_ = self.module.init(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['''params''']
return random_params
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = jnp.transpose(SCREAMING_SNAKE_CASE_ , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa ) , rngs={} , ) | 710 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = left
SCREAMING_SNAKE_CASE_ = right
def __repr__(self ):
"""simple docstring"""
return f'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = collection
SCREAMING_SNAKE_CASE_ = function
if self.collection:
SCREAMING_SNAKE_CASE_ = self._build_tree(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self._update_tree(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self._query_range(self.root , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.collection[start] )
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = self._build_tree(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE_ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.fn(node.left.val , node.right.val )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE_ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE_ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
lowerCAmelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print() | 628 | 0 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCAmelCase__ = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def _lowerCamelCase ( __a, __a ):
'''simple docstring'''
warnings.warn(__a, __a )
requires_backends(__a, '''sklearn''' )
return (preds == labels).mean()
def _lowerCamelCase ( __a, __a ):
'''simple docstring'''
warnings.warn(__a, __a )
requires_backends(__a, '''sklearn''' )
SCREAMING_SNAKE_CASE_ = simple_accuracy(__a, __a )
SCREAMING_SNAKE_CASE_ = fa_score(y_true=__a, y_pred=__a )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _lowerCamelCase ( __a, __a ):
'''simple docstring'''
warnings.warn(__a, __a )
requires_backends(__a, '''sklearn''' )
SCREAMING_SNAKE_CASE_ = pearsonr(__a, __a )[0]
SCREAMING_SNAKE_CASE_ = spearmanr(__a, __a )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _lowerCamelCase ( __a, __a, __a ):
'''simple docstring'''
warnings.warn(__a, __a )
requires_backends(__a, '''sklearn''' )
assert len(__a ) == len(__a ), F'Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a, __a )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a, __a )}
elif task_name == "mrpc":
return acc_and_fa(__a, __a )
elif task_name == "sts-b":
return pearson_and_spearman(__a, __a )
elif task_name == "qqp":
return acc_and_fa(__a, __a )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a, __a )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a, __a )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a, __a )}
elif task_name == "rte":
return {"acc": simple_accuracy(__a, __a )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a, __a )}
elif task_name == "hans":
return {"acc": simple_accuracy(__a, __a )}
else:
raise KeyError(__a )
def _lowerCamelCase ( __a, __a, __a ):
'''simple docstring'''
warnings.warn(__a, __a )
requires_backends(__a, '''sklearn''' )
if len(__a ) != len(__a ):
raise ValueError(F'Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(__a, __a )}
else:
raise KeyError(__a ) | 711 |
"""simple docstring"""
def _lowerCamelCase ( __a ):
if not isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
SCREAMING_SNAKE_CASE_ = F'Input value of [number={number}] must be > 0'
raise ValueError(__a )
SCREAMING_SNAKE_CASE_ = 1
for i in range(1, __a ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 628 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 712 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__a ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
SCREAMING_SNAKE_CASE_ = i + 1
else:
SCREAMING_SNAKE_CASE_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''') | 628 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a, __a ):
return (preds == labels).mean()
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCAmelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__a, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__a, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__a ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__a, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__a, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__a, args=__a, train_dataset=__a, eval_dataset=__a, compute_metrics=__a, data_collator=__a, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(__a, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', __a, __a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__a )
return results
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 713 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a=2, __a=3, __a=16, __a = 10, __a = 2 ):
def get_dataset(__a ):
SCREAMING_SNAKE_CASE_ = torch.randn(batch_size * n_batches, 1 )
return TensorDataset(__a, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1 ) )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = get_dataset(__a )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
SCREAMING_SNAKE_CASE_ = DataLoader(__a, shuffle=__a, batch_size=__a, num_workers=4 )
return (train_dataloader, valid_dataloader)
def _lowerCamelCase ( __a, __a, __a, __a, __a, __a=None ):
SCREAMING_SNAKE_CASE_ = []
for epoch in range(__a ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = batch
SCREAMING_SNAKE_CASE_ = model(__a )
SCREAMING_SNAKE_CASE_ = torch.nn.functional.mse_loss(__a, __a )
accelerator.backward(__a )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class snake_case ( nn.Module ):
def __init__(self ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.randn(1 ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return x * self.a + self.b
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = Accelerator()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
SCREAMING_SNAKE_CASE_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((SCREAMING_SNAKE_CASE_) ,(SCREAMING_SNAKE_CASE_)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE_ = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE_ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE_ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def _lowercase (self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCAmelCase__ = '/tmp/accelerate/state_checkpointing'
lowerCAmelCase__ = DummyModel()
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowerCAmelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowerCAmelCase__, lowerCAmelCase__ = dummy_dataloaders()
lowerCAmelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowerCAmelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowerCAmelCase__, lowerCAmelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
lowerCAmelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
lowerCAmelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 628 | 0 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger('transformers.models.speecht5')
def _lowerCamelCase ( __a, __a, __a ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_ = checkpoint['''input_conv.weight_g''']
SCREAMING_SNAKE_CASE_ = checkpoint['''input_conv.weight_v''']
SCREAMING_SNAKE_CASE_ = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_ = checkpoint[F'upsamples.{i}.1.weight_g']
SCREAMING_SNAKE_CASE_ = checkpoint[F'upsamples.{i}.1.weight_v']
SCREAMING_SNAKE_CASE_ = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
SCREAMING_SNAKE_CASE_ = checkpoint['''output_conv.1.weight_g''']
SCREAMING_SNAKE_CASE_ = checkpoint['''output_conv.1.weight_v''']
SCREAMING_SNAKE_CASE_ = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowerCamelCase ( __a, __a, __a, __a=None, __a=None, ):
if config_path is not None:
SCREAMING_SNAKE_CASE_ = SpeechTaHifiGanConfig.from_pretrained(__a )
else:
SCREAMING_SNAKE_CASE_ = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_ = SpeechTaHifiGan(__a )
SCREAMING_SNAKE_CASE_ = torch.load(__a )
load_weights(orig_checkpoint['''model''']['''generator'''], __a, __a )
SCREAMING_SNAKE_CASE_ = np.load(__a )
SCREAMING_SNAKE_CASE_ = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_ = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a ).float()
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a ).float()
model.save_pretrained(__a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 714 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _lowercase (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ = samplea.shape[0]
SCREAMING_SNAKE_CASE_ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop()
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3 | 628 | 0 |
"""simple docstring"""
def _lowerCamelCase ( __a = 10 ):
if not isinstance(__a, __a ) or n < 0:
raise ValueError('''Invalid input''' )
SCREAMING_SNAKE_CASE_ = 10**n
SCREAMING_SNAKE_CASE_ = 28_433 * (pow(2, 7_830_457, __a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 715 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = TransfoXLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [0, 4, 8, 7] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' ) | 628 | 0 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCAmelCase__ = 299792458
# Symbols
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = symbols('ct x y z')
def _lowerCamelCase ( __a ):
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def _lowerCamelCase ( __a ):
return 1 / sqrt(1 - beta(__a ) ** 2 )
def _lowerCamelCase ( __a ):
return np.array(
[
[gamma(__a ), -gamma(__a ) * beta(__a ), 0, 0],
[-gamma(__a ) * beta(__a ), gamma(__a ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _lowerCamelCase ( __a, __a = None ):
# Ensure event is not empty
if event is None:
SCREAMING_SNAKE_CASE_ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__a ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCAmelCase__ = transform(29979245)
print('Example of four vector: ')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
lowerCAmelCase__ = {ct: c, x: 1, y: 1, z: 1}
lowerCAmelCase__ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''') | 716 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__a, max_perimeter + 1 ):
SCREAMING_SNAKE_CASE_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__a ):
SCREAMING_SNAKE_CASE_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCamelCase ( __a = 1_000 ):
SCREAMING_SNAKE_CASE_ = pythagorean_triple(__a )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''') | 628 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class snake_case ( unittest.TestCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = size if size is not None else {'''shortest_edge''': 18}
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def _lowercase (self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = LevitImageProcessor if is_vision_available() else None
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LevitImageProcessingTester(self )
@property
def _lowercase (self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 717 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( __a ):
if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(__a, '''_dynamo''' ):
return False
return isinstance(__a, torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( __a, __a = True ):
SCREAMING_SNAKE_CASE_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ = is_compiled_module(__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ = getattr(__a, '''forward''' )
SCREAMING_SNAKE_CASE_ = model.__dict__.pop('''_original_forward''', __a )
if original_forward is not None:
while hasattr(__a, '''__wrapped__''' ):
SCREAMING_SNAKE_CASE_ = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ = forward
if getattr(__a, '''_converted_to_transformer_engine''', __a ):
convert_model(__a, to_transformer_engine=__a )
if is_compiled:
SCREAMING_SNAKE_CASE_ = model
SCREAMING_SNAKE_CASE_ = compiled_model
return model
def _lowerCamelCase ( ):
PartialState().wait_for_everyone()
def _lowerCamelCase ( __a, __a ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__a, __a )
elif PartialState().local_process_index == 0:
torch.save(__a, __a )
@contextmanager
def _lowerCamelCase ( **__a ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ = str(__a )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( __a ):
if not hasattr(__a, '''__qualname__''' ) and not hasattr(__a, '''__name__''' ):
SCREAMING_SNAKE_CASE_ = getattr(__a, '''__class__''', __a )
if hasattr(__a, '''__qualname__''' ):
return obj.__qualname__
if hasattr(__a, '''__name__''' ):
return obj.__name__
return str(__a )
def _lowerCamelCase ( __a, __a ):
for key, value in source.items():
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = destination.setdefault(__a, {} )
merge_dicts(__a, __a )
else:
SCREAMING_SNAKE_CASE_ = value
return destination
def _lowerCamelCase ( __a = None ):
if port is None:
SCREAMING_SNAKE_CASE_ = 29_500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0 | 628 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase__ = 'docs/source/en/_toctree.yml'
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = defaultdict(__a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__a, key=lambda __a : s["title"].lower() )
def _lowerCamelCase ( __a=False ):
with open(__a, encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ = content[api_idx]['''sections''']
# Then to the model doc
SCREAMING_SNAKE_CASE_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ = api_doc[model_idx]['''sections''']
SCREAMING_SNAKE_CASE_ = [(idx, section) for idx, section in enumerate(__a ) if '''sections''' in section]
SCREAMING_SNAKE_CASE_ = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ = modality_doc['''sections''']
SCREAMING_SNAKE_CASE_ = clean_model_doc_toc(__a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ = True
if overwrite:
SCREAMING_SNAKE_CASE_ = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ = model_doc
SCREAMING_SNAKE_CASE_ = api_doc
with open(__a, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a, allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite) | 718 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
return input_text, output_text
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ = '''adapt react readapt apt'''
SCREAMING_SNAKE_CASE_ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) | 628 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCAmelCase__ = random.Random()
def _lowerCamelCase ( __a, __a=1.0, __a=None, __a=None ):
if rng is None:
SCREAMING_SNAKE_CASE_ = global_rng
SCREAMING_SNAKE_CASE_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class snake_case ( unittest.TestCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=20_00 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=1_60_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = min_seq_length
SCREAMING_SNAKE_CASE_ = max_seq_length
SCREAMING_SNAKE_CASE_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE_ = feature_size
SCREAMING_SNAKE_CASE_ = num_mel_bins
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = return_attention_mask
SCREAMING_SNAKE_CASE_ = do_normalize
def _lowercase (self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase (self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
SCREAMING_SNAKE_CASE_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = SpeechaTextFeatureExtractor if is_speech_available() else None
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = SpeechaTextFeatureExtractionTester(self )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_ , axis=0 ) - 1 ) < 1e-3 ) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE_ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE_ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
SCREAMING_SNAKE_CASE_ = np.asarray(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = ['''longest''', '''max_length''', '''do_not_pad''']
SCREAMING_SNAKE_CASE_ = [None, 16, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = feature_extractor(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = inputs.input_features
SCREAMING_SNAKE_CASE_ = inputs.attention_mask
SCREAMING_SNAKE_CASE_ = [np.sum(SCREAMING_SNAKE_CASE_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = ['''longest''', '''max_length''', '''do_not_pad''']
SCREAMING_SNAKE_CASE_ = [None, 16, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = feature_extractor(
SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , return_attention_mask=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = inputs.input_features
SCREAMING_SNAKE_CASE_ = inputs.attention_mask
SCREAMING_SNAKE_CASE_ = [np.sum(SCREAMING_SNAKE_CASE_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = feature_extractor(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=4 , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = inputs.input_features
SCREAMING_SNAKE_CASE_ = inputs.attention_mask
SCREAMING_SNAKE_CASE_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = feature_extractor(
SCREAMING_SNAKE_CASE_ , padding='''longest''' , max_length=4 , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = inputs.input_features
SCREAMING_SNAKE_CASE_ = inputs.attention_mask
SCREAMING_SNAKE_CASE_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
SCREAMING_SNAKE_CASE_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE_ = feature_extractor(
SCREAMING_SNAKE_CASE_ , padding='''longest''' , max_length=16 , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = inputs.input_features
SCREAMING_SNAKE_CASE_ = inputs.attention_mask
SCREAMING_SNAKE_CASE_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = np.random.rand(1_00 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE_ = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE_ = ds.sort('''id''' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
SCREAMING_SNAKE_CASE_ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE_ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) | 719 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __a, __a, __a, __a ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__a )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__a )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__a, __a, is_trivia_qa=__a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
) | 628 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['MobileNetV2FeatureExtractor']
lowerCAmelCase__ = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 720 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCamelCase ( __a ):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''', __a, )
if isinstance(__a, torch.Tensor ):
return image
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [image]
if isinstance(image[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = image[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE_ = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = np.array(__a ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = image.transpose(0, 3, 1, 2 )
SCREAMING_SNAKE_CASE_ = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(image[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return image
def _lowerCamelCase ( __a ):
if isinstance(__a, torch.Tensor ):
return mask
elif isinstance(__a, PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = [mask]
if isinstance(mask[0], PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = mask[0].size
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE_ = [np.array(m.convert('''L''' ).resize((w, h), resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE_ = np.concatenate(__a, axis=0 )
SCREAMING_SNAKE_CASE_ = mask.astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a )
elif isinstance(mask[0], torch.Tensor ):
SCREAMING_SNAKE_CASE_ = torch.cat(__a, dim=0 )
return mask
class snake_case ( __lowercase ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2_50 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = _preprocess_image(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = _preprocess_mask(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE_ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
SCREAMING_SNAKE_CASE_ = original_image.shape
SCREAMING_SNAKE_CASE_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
SCREAMING_SNAKE_CASE_ = eta
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE_ = generator[0] if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE_ = self.scheduler.undo_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ ) | 628 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
sd_pipe.set_scheduler('''sample_euler''' )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
sd_pipe.set_scheduler('''sample_euler''' )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = sd_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 721 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a, __a ):
return (preds == labels).mean()
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCAmelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__a, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__a, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__a ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__a, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__a, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__a, args=__a, train_dataset=__a, eval_dataset=__a, compute_metrics=__a, data_collator=__a, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(__a, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', __a, __a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__a )
return results
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 628 | 0 |
import math
def _A ( lowerCamelCase ):
a__ : Any = 0
a__ : Union[str, Any] = 0
while num > 0:
a__ : Optional[int] = num % 8
a__ : List[str] = octal + (remainder * math.floor(math.pow(10 , lowerCamelCase ) ))
counter += 1
a__ : Any = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(lowerCamelCase )}"""
def _A ( ):
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 629 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 | 1 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def _A ( lowerCamelCase , lowerCamelCase=None ):
require_version(deps[pkg] , lowerCamelCase )
| 629 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[Any] = MvpTokenizer
_UpperCamelCase : Any = MvpTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Union[str, Any] = filter_roberta_detectors
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
a__ : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a__ : int = dict(zip(snake_case , range(len(snake_case ) ) ) )
a__ : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a__ : Union[str, Any] = {"unk_token": "<unk>"}
a__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case ) )
def _snake_case ( self , **snake_case ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _snake_case ( self , **snake_case ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def _snake_case ( self , snake_case ) -> Dict:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def _snake_case ( self ) -> str:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
a__ : Dict = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ : str = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
a__ : int = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
# Test that special tokens are reset
@require_torch
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ : Optional[int] = tokenizer(snake_case , padding=snake_case , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("labels" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
@require_torch
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : str = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ : List[Any] = tokenizer(text_target=snake_case , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ : Union[str, Any] = tokenizer(
["I am a small frog" * 1_024, "I am a small frog"] , padding=snake_case , truncation=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : int = ["A long paragraph for summarization."]
a__ : Optional[int] = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a__ : List[Any] = tokenizer(snake_case , text_target=snake_case , return_tensors="pt" )
a__ : int = inputs["input_ids"]
a__ : Optional[int] = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
pass
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a__ : List[Any] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
a__ : Dict = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
a__ : int = "A, <mask> AllenNLP sentence."
a__ : List[str] = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
a__ : List[str] = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a__ : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a__ : int = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 629 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import operator
def _A ( lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None ):
a__ : List[str] = operator.lt if reverse else operator.gt
a__ : str = solution or []
if not arr:
return solution
a__ : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(lowerCamelCase ):
if _operator(lowerCamelCase , sublist[-1] ):
sublist.append(lowerCamelCase )
arr.pop(lowerCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(lowerCamelCase )
else:
while sublist:
a__ : Dict = sublist.pop(0 )
for i, xx in enumerate(lowerCamelCase ):
if not _operator(lowerCamelCase , lowerCamelCase ):
solution.insert(lowerCamelCase , lowerCamelCase )
break
else:
solution.append(lowerCamelCase )
strand_sort(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 629 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
SCREAMING_SNAKE_CASE__ : int = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
SCREAMING_SNAKE_CASE__ : Optional[int] = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE__ : Any = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def _A ( lowerCamelCase , lowerCamelCase ):
for tf_name, hf_name in patterns:
a__ : Dict = k.replace(lowerCamelCase , lowerCamelCase )
return k
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = BigBirdPegasusConfig(**lowerCamelCase )
a__ : int = BigBirdPegasusForConditionalGeneration(lowerCamelCase )
a__ : List[Any] = torch_model.state_dict()
a__ : List[Any] = {}
# separating decoder weights
a__ : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
a__ : Optional[Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
a__ : Tuple = [k.endswith(lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(lowerCamelCase ):
continue
a__ : List[str] = DECODER_PATTERNS
a__ : Optional[int] = rename_state_dict_key(lowerCamelCase , lowerCamelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
a__ : str = v.T
a__ : str = torch.from_numpy(lowerCamelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
a__ : int = [k.endswith(lowerCamelCase ) for ending in KEYS_TO_IGNORE]
if any(lowerCamelCase ):
continue
a__ : Tuple = REMAINING_PATTERNS
a__ : Dict = rename_state_dict_key(lowerCamelCase , lowerCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
a__ : Tuple = v.T
a__ : str = torch.from_numpy(lowerCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
a__ : int = mapping["model.embed_positions.weight"]
a__ : List[str] = mapping.pop("model.embed_positions.weight" )
a__ , a__ : int = torch_model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
a__ : Optional[Any] = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def _A ( lowerCamelCase ):
a__ : List[Any] = tf.train.list_variables(lowerCamelCase )
a__ : str = {}
a__ : Any = ["global_step"]
for name, shape in tqdm(lowerCamelCase , desc="converting tf checkpoint to dict" ):
a__ : List[str] = any(pat in name for pat in ignore_name )
if skip_key:
continue
a__ : List[str] = tf.train.load_variable(lowerCamelCase , lowerCamelCase )
a__ : List[Any] = array
return tf_weights
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Union[str, Any] = get_tf_weights_as_numpy(lowerCamelCase )
a__ : Union[str, Any] = convert_bigbird_pegasus(lowerCamelCase , lowerCamelCase )
torch_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 629 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _A ( lowerCamelCase ):
def wrapper(*lowerCamelCase , **lowerCamelCase ):
a__ : Optional[int] = timeit.default_timer()
a__ : List[Any] = func(*lowerCamelCase , **lowerCamelCase )
a__ : List[Any] = timeit.default_timer() - starttime
return delta
a__ : str = func.__name__
return wrapper
def _A ( lowerCamelCase , lowerCamelCase=100 , lowerCamelCase=None ):
a__ : Any = []
a__ : Optional[int] = seq_shapes or {}
for i in range(lowerCamelCase ):
a__ : List[Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCamelCase , _ArrayXD ):
a__ : List[Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCamelCase , datasets.Value ):
if v.dtype == "string":
a__ : Optional[Any] = "The small grey turtle was surprisingly fast when challenged."
else:
a__ : Union[str, Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCamelCase , datasets.Sequence ):
while isinstance(lowerCamelCase , datasets.Sequence ):
a__ : Optional[int] = v.feature
a__ : str = seq_shapes[k]
a__ : str = np.random.rand(*lowerCamelCase ).astype(v.dtype )
a__ : Any = data
dummy_data.append((i, example) )
return dummy_data
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=100 , lowerCamelCase=None ):
a__ : List[Any] = generate_examples(lowerCamelCase , num_examples=lowerCamelCase , seq_shapes=lowerCamelCase )
with ArrowWriter(features=lowerCamelCase , path=lowerCamelCase ) as writer:
for key, record in dummy_data:
a__ : Union[str, Any] = features.encode_example(lowerCamelCase )
writer.write(lowerCamelCase )
a__ , a__ : List[str] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
a__ : str = datasets.Dataset.from_file(filename=lowerCamelCase , info=datasets.DatasetInfo(features=lowerCamelCase ) )
return dataset
| 629 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 | 1 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 1 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
class __lowerCAmelCase :
_UpperCamelCase : str = None
@experimental
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return _map_with_joblib(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Any = num_proc if num_proc <= len(lowerCamelCase ) else len(lowerCamelCase )
a__ : Any = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowerCamelCase ):
a__ : int = len(lowerCamelCase ) // num_proc
a__ : Optional[int] = len(lowerCamelCase ) % num_proc
a__ : List[Any] = div * index + min(lowerCamelCase , lowerCamelCase )
a__ : Union[str, Any] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowerCamelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowerCamelCase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowerCamelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
a__ , a__ : str = None, None
if not disable_tqdm:
a__ , a__ : int = (RLock(),), tqdm.set_lock
with Pool(lowerCamelCase , initargs=lowerCamelCase , initializer=lowerCamelCase ) as pool:
a__ : List[str] = pool.map(lowerCamelCase , lowerCamelCase )
logger.info(F"""Finished {num_proc} processes""" )
a__ : List[Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowerCamelCase )} objects""" )
return mapped
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=lowerCamelCase ):
return joblib.Parallel()(
joblib.delayed(lowerCamelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _A ( lowerCamelCase ):
a__ : Dict = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
a__ : Union[str, Any] = None
| 629 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """bert-base-cased"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """google/pegasus-xsum"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
SCREAMING_SNAKE_CASE__ : Dict = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
SCREAMING_SNAKE_CASE__ : int = """patrickvonplaten/t5-tiny-random"""
SCREAMING_SNAKE_CASE__ : str = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """sshleifer/tiny-mbart"""
SCREAMING_SNAKE_CASE__ : str = """sshleifer/tiny-marian-en-de"""
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Union[str, Any] = "\n".join(lowerCamelCase )
Path(lowerCamelCase ).open("w" ).writelines(lowerCamelCase )
def _A ( lowerCamelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCamelCase , F"""{split}.source""" ) , lowerCamelCase )
_dump_articles(os.path.join(lowerCamelCase , F"""{split}.target""" ) , lowerCamelCase )
return tmp_dir
class __lowerCAmelCase ( _UpperCamelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def _snake_case ( self , snake_case ) -> Dict:
"""simple docstring"""
a__ : Optional[int] = AutoTokenizer.from_pretrained(snake_case )
a__ : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
a__ : int = max(len(tokenizer.encode(snake_case ) ) for a in ARTICLES )
a__ : List[Any] = max(len(tokenizer.encode(snake_case ) ) for a in SUMMARIES )
a__ : Tuple = 4
a__ : Optional[Any] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
a__ , a__ : Union[str, Any] = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
a__ : Tuple = SeqaSeqDataset(
snake_case , data_dir=snake_case , type_path="train" , max_source_length=snake_case , max_target_length=snake_case , src_lang=snake_case , tgt_lang=snake_case , )
a__ : int = DataLoader(snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(snake_case , snake_case )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
a__ : str = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def _snake_case ( self , snake_case ) -> List[str]:
"""simple docstring"""
a__ : Optional[Any] = AutoTokenizer.from_pretrained(snake_case )
a__ : Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
a__ : List[Any] = max(len(tokenizer.encode(snake_case ) ) for a in ARTICLES )
a__ : List[str] = max(len(tokenizer.encode(snake_case ) ) for a in SUMMARIES )
a__ : Optional[int] = 4
a__ : Union[str, Any] = LegacySeqaSeqDataset(
snake_case , data_dir=snake_case , type_path="train" , max_source_length=20 , max_target_length=snake_case , )
a__ : Union[str, Any] = DataLoader(snake_case , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : List[str] = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
a__ : str = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
a__ : Tuple = tmp_dir.joinpath("train.source" ).open().readlines()
a__ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(snake_case , snake_case , 128 , snake_case )
a__ : str = {x.name for x in tmp_dir.iterdir()}
a__ : Dict = {x.name for x in save_dir.iterdir()}
a__ : Dict = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(snake_case ) < len(snake_case )
assert len(snake_case ) == 1
assert len(packed_examples[0] ) == sum(len(snake_case ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
a__ , a__ , a__ : Optional[int] = self._get_dataset(max_len=64 )
a__ : List[Any] = 64
a__ : Optional[int] = ds.make_dynamic_sampler(snake_case , required_batch_size_multiple=snake_case )
a__ : Union[str, Any] = [len(snake_case ) for x in batch_sampler]
assert len(set(snake_case ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(snake_case ) == len(snake_case ) # no dropped or added examples
a__ : Tuple = DataLoader(snake_case , batch_sampler=snake_case , collate_fn=ds.collate_fn , num_workers=2 )
a__ : List[Any] = []
a__ : int = []
for batch in data_loader:
a__ : Tuple = batch["input_ids"].shape
a__ : Tuple = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
a__ : Any = np.product(batch["input_ids"].shape )
num_src_per_batch.append(snake_case )
if num_src_tokens > (max_tokens * 1.1):
failures.append(snake_case )
assert num_src_per_batch[0] == max(snake_case )
if failures:
raise AssertionError(F"""too many tokens in {len(snake_case )} batches""" )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , a__ , a__ : Any = self._get_dataset(max_len=512 )
a__ : Dict = 2
a__ : Optional[int] = ds.make_sortish_sampler(snake_case , shuffle=snake_case )
a__ : int = DataLoader(snake_case , batch_size=snake_case , collate_fn=ds.collate_fn , num_workers=2 )
a__ : Optional[Any] = DataLoader(snake_case , batch_size=snake_case , collate_fn=ds.collate_fn , num_workers=2 , sampler=snake_case )
a__ : Optional[Any] = tokenizer.pad_token_id
def count_pad_tokens(snake_case , snake_case="input_ids" ):
return [batch[k].eq(snake_case ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(snake_case , k="labels" ) ) < sum(count_pad_tokens(snake_case , k="labels" ) )
assert sum(count_pad_tokens(snake_case ) ) < sum(count_pad_tokens(snake_case ) )
assert len(snake_case ) == len(snake_case )
def _snake_case ( self , snake_case=1_000 , snake_case=128 ) -> Tuple:
"""simple docstring"""
if os.getenv("USE_REAL_DATA" , snake_case ):
a__ : Tuple = "examples/seq2seq/wmt_en_ro"
a__ : Tuple = max_len * 2 * 64
if not Path(snake_case ).joinpath("train.len" ).exists():
save_len_file(snake_case , snake_case )
else:
a__ : str = "examples/seq2seq/test_data/wmt_en_ro"
a__ : Dict = max_len * 4
save_len_file(snake_case , snake_case )
a__ : str = AutoTokenizer.from_pretrained(snake_case )
a__ : List[Any] = SeqaSeqDataset(
snake_case , data_dir=snake_case , type_path="train" , max_source_length=snake_case , max_target_length=snake_case , n_obs=snake_case , )
return ds, max_tokens, tokenizer
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ , a__ , a__ : str = self._get_dataset()
a__ : Union[str, Any] = set(DistributedSortishSampler(snake_case , 256 , num_replicas=2 , rank=0 , add_extra_examples=snake_case ) )
a__ : int = set(DistributedSortishSampler(snake_case , 256 , num_replicas=2 , rank=1 , add_extra_examples=snake_case ) )
assert idsa.intersection(snake_case ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
a__ : int = AutoTokenizer.from_pretrained(snake_case , use_fast=snake_case )
if tok_name == MBART_TINY:
a__ : Optional[Any] = SeqaSeqDataset(
snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
a__ : str = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
a__ : Tuple = SeqaSeqDataset(
snake_case , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
a__ : Any = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(snake_case ) == 1 if tok_name == BART_TINY else len(snake_case ) == 0
| 629 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
SCREAMING_SNAKE_CASE__ : List[Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A ( lowerCamelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCamelCase )
def _A ( lowerCamelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
a__ : int = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowerCamelCase , id=lowerCamelCase )
| 629 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629 | 1 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629 | 1 |
SCREAMING_SNAKE_CASE__ : int = range(2, 2_0 + 1)
SCREAMING_SNAKE_CASE__ : Optional[int] = [1_0**k for k in range(ks[-1] + 1)]
SCREAMING_SNAKE_CASE__ : dict[int, dict[int, list[list[int]]]] = {}
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = sum(a_i[j] for j in range(lowerCamelCase , len(lowerCamelCase ) ) )
a__ : Any = sum(a_i[j] * base[j] for j in range(min(len(lowerCamelCase ) , lowerCamelCase ) ) )
a__ , a__ : List[str] = 0, 0
a__ : str = n - i
a__ : Dict = memo.get(lowerCamelCase )
if sub_memo is not None:
a__ : Union[str, Any] = sub_memo.get(lowerCamelCase )
if jumps is not None and len(lowerCamelCase ) > 0:
# find and make the largest jump without going over
a__ : Optional[Any] = -1
for _k in range(len(lowerCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
a__ : Dict = _k
break
if max_jump >= 0:
a__ , a__ , a__ : Dict = jumps[max_jump]
# since the difference between jumps is cached, add c
a__ : Any = diff + c
for j in range(min(lowerCamelCase , len(lowerCamelCase ) ) ):
a__ , a__ : Tuple = divmod(lowerCamelCase , 10 )
if new_c > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
a__ : List[Any] = []
else:
a__ : Optional[int] = {c: []}
a__ : Union[str, Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
a__ , a__ : List[str] = next_term(lowerCamelCase , k - 1 , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
a__ , a__ : Dict = compute(lowerCamelCase , lowerCamelCase , i + dn , lowerCamelCase )
diff += _diff
dn += terms_jumped
a__ : Tuple = sub_memo[c]
# keep jumps sorted by # of terms skipped
a__ : Tuple = 0
while j < len(lowerCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCamelCase , (diff, dn, k) )
return (diff, dn)
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if i >= n:
return 0, i
if k > len(lowerCamelCase ):
a_i.extend([0 for _ in range(k - len(lowerCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
a__ : str = i
a__ , a__ , a__ : int = 0, 0, 0
for j in range(len(lowerCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
a__ : int = ds_c + ds_b
diff += addend
a__ : Dict = 0
for j in range(lowerCamelCase ):
a__ : Tuple = a_i[j] + addend
a__ , a__ : Any = divmod(lowerCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return diff, i - start_i
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for j in range(lowerCamelCase , len(lowerCamelCase ) ):
a__ : int = digits[j] + addend
if s >= 10:
a__ , a__ : Optional[Any] = divmod(lowerCamelCase , 10 )
a__ : Optional[Any] = addend // 10 + quotient
else:
a__ : int = s
a__ : Optional[int] = addend // 10
if addend == 0:
break
while addend > 0:
a__ , a__ : Union[str, Any] = divmod(lowerCamelCase , 10 )
digits.append(lowerCamelCase )
def _A ( lowerCamelCase = 10**15 ):
a__ : str = [1]
a__ : str = 1
a__ : Dict = 0
while True:
a__ , a__ : Tuple = next_term(lowerCamelCase , 20 , i + dn , lowerCamelCase )
dn += terms_jumped
if dn == n - i:
break
a__ : Tuple = 0
for j in range(len(lowerCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'{solution() = }')
| 629 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 1 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
SCREAMING_SNAKE_CASE__ : Tuple = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
SCREAMING_SNAKE_CASE__ : int = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = CamembertTokenizer
_UpperCamelCase : Dict = CamembertTokenizerFast
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Union[str, Any] = True
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a__ : Any = CamembertTokenizer(snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = "<pad>"
a__ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(snake_case ) , 1_004 )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def _snake_case ( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Any = CamembertTokenizer(snake_case )
tokenizer.save_pretrained(self.tmpdirname )
a__ : str = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
a__ : List[Any] = "I was born in 92000, and this is falsé."
a__ : List[str] = tokenizer.encode(snake_case )
a__ : List[str] = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
a__ : Optional[int] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
a__ : Dict = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
a__ : Tuple = tokenizer.convert_ids_to_tokens(snake_case )
a__ : Union[str, Any] = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a__ : Tuple = self.get_tokenizer()
a__ : List[str] = self.get_rust_tokenizer()
a__ : Tuple = "I was born in 92000, and this is falsé."
a__ : Dict = tokenizer.tokenize(snake_case )
a__ : str = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
a__ : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
a__ : Any = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
a__ : Tuple = self.get_rust_tokenizer()
a__ : Any = tokenizer.encode(snake_case )
a__ : List[Any] = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : List[str] = {"input_ids": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
a__ : List[Any] = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=snake_case , )
| 629 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629 | 1 |
from __future__ import annotations
def _A ( lowerCamelCase ): # This function is recursive
a__ : Any = len(lowerCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ : List[str] = array[0]
a__ : List[str] = False
a__ : Optional[int] = 1
a__ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ : Tuple = True
a__ : str = [element for element in array[i:] if element >= array[i]]
a__ : Dict = longest_subsequence(lowerCamelCase )
if len(lowerCamelCase ) > len(lowerCamelCase ):
a__ : List[Any] = temp_array
else:
i += 1
a__ : str = [element for element in array[1:] if element >= pivot]
a__ : Union[str, Any] = [pivot, *longest_subsequence(lowerCamelCase )]
if len(lowerCamelCase ) > len(lowerCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 | 1 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : str = AutoConfig.from_pretrained(lowerCamelCase )
a__ : Optional[int] = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase )
a__ : Dict = checkpoints.load_tax_checkpoint(lowerCamelCase )
a__ : List[Any] = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
a__ : Optional[Any] = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
a__ : Union[str, Any] = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
a__ : Optional[Any] = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
a__ : Union[str, Any] = F"""layers_{str(lowerCamelCase )}"""
# Self-Attention
a__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
a__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
a__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
a__ : Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
a__ : int = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
a__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
a__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
a__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
a__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
a__ : Any = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
a__ : Tuple = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
a__ : int = flax_model.params["encoder"]["block"][str(lowerCamelCase )]["layer"]
a__ : Union[str, Any] = tax_attention_key
a__ : Tuple = tax_attention_out
a__ : Tuple = tax_attention_query
a__ : List[Any] = tax_attention_value
a__ : List[Any] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
a__ : List[Any] = tax_global_layer_norm
if split_mlp_wi:
a__ : Optional[int] = tax_mlp_wi_a
a__ : Tuple = tax_mlp_wi_a
else:
a__ : int = tax_mlp_wi
a__ : str = tax_mlp_wo
a__ : str = tax_mlp_layer_norm
a__ : Union[str, Any] = flax_model_encoder_layer_block
# Only for layer 0:
a__ : Optional[int] = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
a__ : Union[str, Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
a__ : str = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
a__ : List[Any] = tax_encoder_global_rel_embedding
# Assigning
a__ : int = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
a__ : List[Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
a__ : Union[str, Any] = F"""layers_{str(lowerCamelCase )}"""
# Self-Attention
a__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
a__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
a__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
a__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
a__ : str = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
a__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
a__ : Optional[int] = tax_enc_dec_attention_module["key"]["kernel"]
a__ : str = tax_enc_dec_attention_module["out"]["kernel"]
a__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
a__ : Any = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
a__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
a__ : int = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
a__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
a__ : Dict = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
a__ : List[str] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
a__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
a__ : List[Any] = flax_model.params["decoder"]["block"][str(lowerCamelCase )]["layer"]
a__ : Optional[Any] = tax_attention_key
a__ : Optional[Any] = tax_attention_out
a__ : Any = tax_attention_query
a__ : List[str] = tax_attention_value
a__ : Optional[int] = tax_pre_attention_layer_norm
a__ : Union[str, Any] = tax_enc_dec_attention_key
a__ : int = tax_enc_dec_attention_out
a__ : List[str] = tax_enc_dec_attention_query
a__ : int = tax_enc_dec_attention_value
a__ : List[str] = tax_cross_layer_norm
if split_mlp_wi:
a__ : Union[str, Any] = tax_mlp_wi_a
a__ : int = tax_mlp_wi_a
else:
a__ : List[Any] = tax_mlp_wi
a__ : Union[str, Any] = tax_mlp_wo
a__ : int = txa_mlp_layer_norm
a__ : Dict = flax_model_decoder_layer_block
# Decoder Normalization
a__ : Any = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
a__ : List[str] = txa_decoder_norm
# Only for layer 0:
a__ : Union[str, Any] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
a__ : str = tax_decoder_rel_embedding
# Token Embeddings
a__ : int = tax_model["target"]["token_embedder"]["embedding"]
a__ : Dict = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
a__ : int = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(lowerCamelCase )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 629 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[Any] = """linear"""
_UpperCamelCase : List[Any] = """cosine"""
_UpperCamelCase : Tuple = """cosine_with_restarts"""
_UpperCamelCase : Optional[Any] = """polynomial"""
_UpperCamelCase : int = """constant"""
_UpperCamelCase : Optional[int] = """constant_with_warmup"""
_UpperCamelCase : Optional[int] = """piecewise_constant"""
def _A ( lowerCamelCase , lowerCamelCase = -1 ):
return LambdaLR(lowerCamelCase , lambda lowerCamelCase : 1 , last_epoch=lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = -1 ):
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1.0 , lowerCamelCase ) )
return 1.0
return LambdaLR(lowerCamelCase , lowerCamelCase , last_epoch=lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = -1 ):
a__ : Tuple = {}
a__ : str = step_rules.split("," )
for rule_str in rule_list[:-1]:
a__ , a__ : Dict = rule_str.split(":" )
a__ : List[Any] = int(lowerCamelCase )
a__ : Optional[int] = float(lowerCamelCase )
a__ : List[str] = value
a__ : int = float(rule_list[-1] )
def create_rules_function(lowerCamelCase , lowerCamelCase ):
def rule_func(lowerCamelCase ) -> float:
a__ : Dict = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
a__ : int = create_rules_function(lowerCamelCase , lowerCamelCase )
return LambdaLR(lowerCamelCase , lowerCamelCase , last_epoch=lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=-1 ):
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.5 , lowerCamelCase = -1 ):
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) )
a__ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , lowerCamelCase = -1 ):
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) )
a__ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1E-7 , lowerCamelCase=1.0 , lowerCamelCase=-1 ):
a__ : Optional[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
a__ : Any = lr_init - lr_end
a__ : List[str] = num_training_steps - num_warmup_steps
a__ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
a__ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 1.0 , lowerCamelCase = -1 , ):
a__ : List[Any] = SchedulerType(lowerCamelCase )
a__ : Dict = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase , last_epoch=lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase , step_rules=lowerCamelCase , last_epoch=lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase , num_warmup_steps=lowerCamelCase , last_epoch=lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase , num_warmup_steps=lowerCamelCase , num_training_steps=lowerCamelCase , num_cycles=lowerCamelCase , last_epoch=lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase , num_warmup_steps=lowerCamelCase , num_training_steps=lowerCamelCase , power=lowerCamelCase , last_epoch=lowerCamelCase , )
return schedule_func(
lowerCamelCase , num_warmup_steps=lowerCamelCase , num_training_steps=lowerCamelCase , last_epoch=lowerCamelCase )
| 629 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 | 1 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE__ : Dict = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE__ : Dict = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
SCREAMING_SNAKE_CASE__ : Optional[int] = spec.loader.load_module()
SCREAMING_SNAKE_CASE__ : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE__ : int = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
SCREAMING_SNAKE_CASE__ : List[str] = {
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def _A ( ):
a__ : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
a__ : Optional[Any] = False
# source code of `config_class`
a__ : Union[str, Any] = inspect.getsource(lowerCamelCase )
a__ : Tuple = _re_checkpoint.findall(lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
a__ , a__ : Union[str, Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
a__ : Any = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
a__ : Tuple = True
break
a__ : Union[str, Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
a__ : Dict = "\n".join(sorted(lowerCamelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 629 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : str = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Tuple = do_lower_case
a__ : Union[str, Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : List[str] = normalizer_class(**snake_case )
a__ : str = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 | 1 |
def _A ( lowerCamelCase = 100_0000 ):
a__ : str = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 629 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE__ : int = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ : Tuple = {
"""facebook/bart-base""": 1_0_2_4,
"""facebook/bart-large""": 1_0_2_4,
"""facebook/bart-large-mnli""": 1_0_2_4,
"""facebook/bart-large-cnn""": 1_0_2_4,
"""facebook/bart-large-xsum""": 1_0_2_4,
"""yjernite/bart_eli5""": 1_0_2_4,
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ["""input_ids""", """attention_mask"""]
_UpperCamelCase : Optional[Any] = BartTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ) -> List[str]:
"""simple docstring"""
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
a__ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) )
a__ : List[Any] = add_prefix_space
a__ : Union[str, Any] = pre_tok_class(**snake_case )
a__ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a__ : str = "post_processor"
a__ : List[str] = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
a__ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : Optional[int] = tuple(state["sep"] )
if "cls" in state:
a__ : Tuple = tuple(state["cls"] )
a__ : Union[str, Any] = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
a__ : Union[str, Any] = add_prefix_space
a__ : Union[str, Any] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
a__ : str = trim_offsets
a__ : List[Any] = True
if changes_to_apply:
a__ : Tuple = getattr(snake_case , state.pop("type" ) )
a__ : Optional[Any] = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def _snake_case ( self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
a__ : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
a__ : Dict = value
def _snake_case ( self , *snake_case , **snake_case ) -> BatchEncoding:
"""simple docstring"""
a__ : List[str] = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case , **snake_case )
def _snake_case ( self , *snake_case , **snake_case ) -> BatchEncoding:
"""simple docstring"""
a__ : str = kwargs.get("is_split_into_words" , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case , **snake_case )
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Dict = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def _snake_case ( self , snake_case , snake_case=None ) -> Any:
"""simple docstring"""
a__ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Any = [self.sep_token_id]
a__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 629 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629 | 1 |
from __future__ import annotations
from math import gcd
def _A ( lowerCamelCase , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
return (pow(lowerCamelCase , 2 ) + step) % modulus
for _ in range(lowerCamelCase ):
# These track the position within the cycle detection logic.
a__ : str = seed
a__ : Optional[int] = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
a__ : Union[str, Any] = rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : Optional[Any] = rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : Tuple = rand_fn(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
a__ : Optional[Any] = gcd(hare - tortoise , lowerCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
a__ : Optional[Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[int] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'{args.num} is probably prime')
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = args.num // divisor
print(f'{args.num} = {divisor} * {quotient}')
| 629 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.getLogger()
def _A ( ):
a__ : int = argparse.ArgumentParser()
parser.add_argument("-f" )
a__ : Dict = parser.parse_args()
return args.f
class __lowerCAmelCase ( _UpperCamelCase ):
def _snake_case ( self ) -> None:
"""simple docstring"""
a__ : Optional[int] = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case )
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(snake_case , "argv" , snake_case ):
a__ : List[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(snake_case , 0.666 )
@slow
@require_torch_non_multi_gpu
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Any = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(snake_case )
a__ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(snake_case )
a__ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(snake_case )
| 629 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 | 1 |
import copy
import random
from transformers import CLIPTokenizer
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , *snake_case , **snake_case ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*snake_case , **snake_case )
a__ : Optional[Any] = {}
def _snake_case ( self , snake_case , *snake_case , **snake_case ) -> str:
"""simple docstring"""
a__ : List[Any] = super().add_tokens(snake_case , *snake_case , **snake_case )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
" `placeholder_token` that is not already in the tokenizer." )
def _snake_case ( self , snake_case , *snake_case , snake_case=1 , **snake_case ) -> str:
"""simple docstring"""
a__ : List[str] = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case , *snake_case , **snake_case )
output.append(snake_case )
else:
a__ : int = []
for i in range(snake_case ):
a__ : List[Any] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(snake_case , *snake_case , **snake_case )
output.append(snake_case )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
a__ : Any = output
def _snake_case ( self , snake_case , snake_case=False , snake_case=1.0 ) -> Dict:
"""simple docstring"""
if isinstance(snake_case , snake_case ):
a__ : Any = []
for i in range(len(snake_case ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a__ : Tuple = self.token_map[placeholder_token]
a__ : Optional[Any] = tokens[: 1 + int(len(snake_case ) * prop_tokens_to_load )]
if vector_shuffle:
a__ : Dict = copy.copy(snake_case )
random.shuffle(snake_case )
a__ : Optional[int] = text.replace(snake_case , " ".join(snake_case ) )
return text
def __call__( self , snake_case , *snake_case , snake_case=False , snake_case=1.0 , **snake_case ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case , vector_shuffle=snake_case , prop_tokens_to_load=snake_case ) , *snake_case , **snake_case , )
def _snake_case ( self , snake_case , *snake_case , snake_case=False , snake_case=1.0 , **snake_case ) -> Tuple:
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case , vector_shuffle=snake_case , prop_tokens_to_load=snake_case ) , *snake_case , **snake_case , )
| 629 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(
_UpperCamelCase ,r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" ,)
class __lowerCAmelCase ( _UpperCamelCase ):
def _snake_case ( self , snake_case ) -> np.ndarray:
"""simple docstring"""
if self.framework == "tf":
a__ : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
a__ : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _snake_case ( self , snake_case ) -> np.ndarray:
"""simple docstring"""
a__ : Dict = self.get_masked_index(snake_case )
a__ : Tuple = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _snake_case ( self , snake_case ) -> int:
"""simple docstring"""
if isinstance(snake_case , snake_case ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case )
def _snake_case ( self , snake_case , snake_case=None , **snake_case ) -> Dict[str, GenericTensor]:
"""simple docstring"""
if return_tensors is None:
a__ : List[str] = self.framework
a__ : Tuple = self.tokenizer(snake_case , return_tensors=snake_case )
self.ensure_exactly_one_mask_token(snake_case )
return model_inputs
def _snake_case ( self , snake_case ) -> List[str]:
"""simple docstring"""
a__ : List[str] = self.model(**snake_case )
a__ : Dict = model_inputs["input_ids"]
return model_outputs
def _snake_case ( self , snake_case , snake_case=5 , snake_case=None ) -> List[Any]:
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
a__ : Any = target_ids.shape[0]
a__ : Tuple = model_outputs["input_ids"][0]
a__ : Tuple = model_outputs["logits"]
if self.framework == "tf":
a__ : Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
a__ : Dict = outputs.numpy()
a__ : str = outputs[0, masked_index, :]
a__ : List[Any] = stable_softmax(snake_case , axis=-1 )
if target_ids is not None:
a__ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case , 0 ) , target_ids.reshape(-1 , 1 ) )
a__ : List[Any] = tf.expand_dims(snake_case , 0 )
a__ : Optional[int] = tf.math.top_k(snake_case , k=snake_case )
a__ , a__ : Tuple = topk.values.numpy(), topk.indices.numpy()
else:
a__ : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
a__ : Tuple = outputs[0, masked_index, :]
a__ : Optional[int] = logits.softmax(dim=-1 )
if target_ids is not None:
a__ : Tuple = probs[..., target_ids]
a__ , a__ : Tuple = probs.topk(snake_case )
a__ : Optional[int] = []
a__ : Union[str, Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
a__ : Union[str, Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
a__ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
a__ : List[str] = target_ids[p].tolist()
a__ : Dict = p
# Filter padding out:
a__ : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
a__ : Union[str, Any] = self.tokenizer.decode(snake_case , skip_special_tokens=snake_case )
a__ : List[Any] = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(snake_case )
result.append(snake_case )
if single_mask:
return result[0]
return result
def _snake_case ( self , snake_case , snake_case=None ) -> Dict:
"""simple docstring"""
if isinstance(snake_case , snake_case ):
a__ : Optional[int] = [targets]
try:
a__ : Dict = self.tokenizer.get_vocab()
except Exception:
a__ : Dict = {}
a__ : List[Any] = []
for target in targets:
a__ : List[str] = vocab.get(snake_case , snake_case )
if id_ is None:
a__ : List[Any] = self.tokenizer(
snake_case , add_special_tokens=snake_case , return_attention_mask=snake_case , return_token_type_ids=snake_case , max_length=1 , truncation=snake_case , )["input_ids"]
if len(snake_case ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
a__ : List[Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
a__ : Tuple = list(set(snake_case ) )
if len(snake_case ) == 0:
raise ValueError("At least one target must be provided when passed." )
a__ : Dict = np.array(snake_case )
return target_ids
def _snake_case ( self , snake_case=None , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = {}
if targets is not None:
a__ : Tuple = self.get_target_ids(snake_case , snake_case )
a__ : Optional[int] = target_ids
if top_k is not None:
a__ : Dict = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self , snake_case , *snake_case , **snake_case ) -> Optional[Any]:
"""simple docstring"""
a__ : List[Any] = super().__call__(snake_case , **snake_case )
if isinstance(snake_case , snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
| 629 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=_UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = ["""keras_nlp"""]
def __init__( self , *snake_case , **snake_case ) -> str:
"""simple docstring"""
requires_backends(self , ["keras_nlp"] )
| 629 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 1 |
def _A ( lowerCamelCase ):
a__ , a__ : Union[str, Any] = [], []
while len(lowerCamelCase ) > 1:
a__ , a__ : List[str] = min(lowerCamelCase ), max(lowerCamelCase )
start.append(lowerCamelCase )
end.append(lowerCamelCase )
collection.remove(lowerCamelCase )
collection.remove(lowerCamelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__ : Tuple = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 629 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 1 |
from __future__ import annotations
class __lowerCAmelCase :
def __init__( self , snake_case = 0 ) -> int:
"""simple docstring"""
a__ : Optional[Any] = key
def _snake_case ( self , snake_case , snake_case ) -> list[str]:
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
a__ : Union[str, Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(snake_case ) ^ key ) for ch in content]
def _snake_case ( self , snake_case , snake_case ) -> list[str]:
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
a__ : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(snake_case ) ^ key ) for ch in content]
def _snake_case ( self , snake_case , snake_case = 0 ) -> str:
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
a__ : List[str] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a__ : Dict = ""
for ch in content:
ans += chr(ord(snake_case ) ^ key )
return ans
def _snake_case ( self , snake_case , snake_case = 0 ) -> str:
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
a__ : Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a__ : List[str] = ""
for ch in content:
ans += chr(ord(snake_case ) ^ key )
return ans
def _snake_case ( self , snake_case , snake_case = 0 ) -> bool:
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
try:
with open(snake_case ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(snake_case , snake_case ) )
except OSError:
return False
return True
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
assert isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )
try:
with open(snake_case ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(snake_case , snake_case ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 629 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 1 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ : Tuple = 2_5_6
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : str = ["""melgan"""]
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
a__ : Dict = math.log(1E-5 ) # Matches MelGAN training.
a__ : Any = 4.0 # Largest value for most examples
a__ : int = 128
self.register_modules(
notes_encoder=snake_case , continuous_encoder=snake_case , decoder=snake_case , scheduler=snake_case , melgan=snake_case , )
def _snake_case ( self , snake_case , snake_case=(-1.0, 1.0) , snake_case=False ) -> int:
"""simple docstring"""
a__ , a__ : Any = output_range
if clip:
a__ : Tuple = torch.clip(snake_case , self.min_value , self.max_value )
# Scale to [0, 1].
a__ : Optional[Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self , snake_case , snake_case=(-1.0, 1.0) , snake_case=False ) -> Optional[int]:
"""simple docstring"""
a__ , a__ : str = input_range
a__ : Tuple = torch.clip(snake_case , snake_case , snake_case ) if clip else outputs
# Scale to [0, 1].
a__ : str = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self , snake_case , snake_case , snake_case ) -> str:
"""simple docstring"""
a__ : str = input_tokens > 0
a__ , a__ : Dict = self.notes_encoder(
encoder_input_tokens=snake_case , encoder_inputs_mask=snake_case )
a__ , a__ : int = self.continuous_encoder(
encoder_inputs=snake_case , encoder_inputs_mask=snake_case )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self , snake_case , snake_case , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : Union[str, Any] = noise_time
if not torch.is_tensor(snake_case ):
a__ : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(snake_case ) and len(timesteps.shape ) == 0:
a__ : str = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
a__ : Tuple = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
a__ : List[Any] = self.decoder(
encodings_and_masks=snake_case , decoder_input_tokens=snake_case , decoder_noise_time=snake_case )
return logits
@torch.no_grad()
def __call__( self , snake_case , snake_case = None , snake_case = 100 , snake_case = True , snake_case = "numpy" , snake_case = None , snake_case = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(snake_case )}.""" )
a__ : Tuple = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
a__ : int = np.zeros([1, 0, self.n_dims] , np.floataa )
a__ : List[Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=snake_case , device=self.device )
for i, encoder_input_tokens in enumerate(snake_case ):
if i == 0:
a__ : Dict = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
a__ : int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=snake_case , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
a__ : str = ones
a__ : Tuple = self.scale_features(
snake_case , output_range=[-1.0, 1.0] , clip=snake_case )
a__ : Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=snake_case , continuous_mask=snake_case , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
a__ : Dict = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=snake_case , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(snake_case )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
a__ : str = self.decode(
encodings_and_masks=snake_case , input_tokens=snake_case , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
a__ : Any = self.scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
a__ : Optional[int] = self.scale_to_features(snake_case , input_range=[-1.0, 1.0] )
a__ : Optional[int] = mel[:1]
a__ : Union[str, Any] = mel.cpu().float().numpy()
a__ : Dict = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case )
logger.info("Generated segment" , snake_case )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
a__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
a__ : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=snake_case )
| 629 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : List[str] = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[str] = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _A ( lowerCamelCase ):
a__ : Tuple = list(s_dict.keys() )
for key in keys:
a__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ : Optional[int] = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F"""{key} -> {new_key}""" )
a__ : Dict = s_dict.pop(lowerCamelCase )
return s_dict
def _A ( lowerCamelCase ):
a__ , a__ : Any = emb.weight.shape
a__ : Optional[Any] = nn.Linear(lowerCamelCase , lowerCamelCase , bias=lowerCamelCase )
a__ : Optional[Any] = emb.weight.data
return lin_layer
def _A ( lowerCamelCase , lowerCamelCase ):
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
a__ : Optional[Any] = os.path.basename(lowerCamelCase )
a__ : List[Any] = url.split("/" )[-2]
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.exists(lowerCamelCase ) and not os.path.isfile(lowerCamelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowerCamelCase ):
a__ : Any = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowerCamelCase ) as source, open(lowerCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=lowerCamelCase , unit_divisor=1024 ) as loop:
while True:
a__ : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(lowerCamelCase )
loop.update(len(lowerCamelCase ) )
a__ : Optional[int] = open(lowerCamelCase , "rb" ).read()
if hashlib.shaaaa(lowerCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _A ( lowerCamelCase , lowerCamelCase ):
if ".pt" not in checkpoint_path:
a__ : str = _download(_MODELS[checkpoint_path] )
else:
a__ : str = torch.load(lowerCamelCase , map_location="cpu" )
a__ : Dict = original_checkpoint["dims"]
a__ : Optional[int] = original_checkpoint["model_state_dict"]
a__ : Any = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCamelCase )
rename_keys(lowerCamelCase )
a__ : Optional[Any] = True
a__ : Optional[Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a__ : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCamelCase , decoder_ffn_dim=lowerCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a__ : Optional[Any] = WhisperForConditionalGeneration(lowerCamelCase )
a__ , a__ : Tuple = model.model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
if len(lowerCamelCase ) > 0 and not set(lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ : str = proj_out_weights
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 629 | 1 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : str = 5_0 # max width of layer names
SCREAMING_SNAKE_CASE__ : int = 7_0 # max width of quantizer names
def _A ( lowerCamelCase ):
a__ : Any = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=lowerCamelCase , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=lowerCamelCase , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=lowerCamelCase , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=lowerCamelCase , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=lowerCamelCase , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=lowerCamelCase , type=lowerCamelCase , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=lowerCamelCase , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _A ( lowerCamelCase ):
if args.calibrator == "max":
a__ : Optional[int] = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
a__ : Union[str, Any] = "histogram"
elif args.calibrator == "mse":
a__ : Optional[int] = "histogram"
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
a__ : Union[str, Any] = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase )
a__ : Any = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False ):
logger.info("Configuring Model for Quantization" )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase , ["embeddings"] , which="weight" , _disabled=lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase , [""] , _disabled=lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase , args.quant_disable_keyword , _disabled=lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase , lowerCamelCase )
if args.clip_gelu:
clip_gelu(lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase )
def _A ( lowerCamelCase ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def _A ( lowerCamelCase , lowerCamelCase ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
def fusea(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
a__ : Optional[int] = qq._amax.detach().item()
a__ : Optional[Any] = qk._amax.detach().item()
a__ : List[str] = qv._amax.detach().item()
a__ : str = max(lowerCamelCase , lowerCamelCase , lowerCamelCase )
qq._amax.fill_(lowerCamelCase )
qk._amax.fill_(lowerCamelCase )
qv._amax.fill_(lowerCamelCase )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _A ( lowerCamelCase , lowerCamelCase ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
a__ : Any = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase )
a__ : Dict = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def _A ( lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
a__ : int = mod.weight.shape[0]
a__ : Tuple = mod._weight_quantizer._amax.detach()
a__ : Optional[int] = torch.ones(lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def _A ( lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
a__ : List[str] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
a__ : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
a__ : Optional[Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase , keepdims=lowerCamelCase ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
a__ : Optional[Any] = amax
def _A ( lowerCamelCase , lowerCamelCase=25 , lowerCamelCase=180 , lowerCamelCase=None ):
if ignore is None:
a__ : Union[str, Any] = []
elif not isinstance(lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = [ignore]
a__ : str = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase , "weight" ):
continue
a__ : Union[str, Any] = max(lowerCamelCase , len(lowerCamelCase ) )
for name, mod in model.named_modules():
a__ : int = getattr(lowerCamelCase , "_input_quantizer" , lowerCamelCase )
a__ : List[Any] = getattr(lowerCamelCase , "_weight_quantizer" , lowerCamelCase )
if not hasattr(lowerCamelCase , "weight" ):
continue
if type(lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase ) is str and s in name]:
continue
a__ : Dict = F"""Act:{input_q.extra_repr()}"""
a__ : Union[str, Any] = F"""Wgt:{weight_q.extra_repr()}"""
a__ : str = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(lowerCamelCase ) <= line_width:
logger.info(lowerCamelCase )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{" ":{name_width}} {wgt_str}""" )
def _A ( lowerCamelCase ):
a__ : Any = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : int = getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase , lowerCamelCase )
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase="both" , **lowerCamelCase ):
a__ : Dict = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(lowerCamelCase , lowerCamelCase , "_input_quantizer" , lowerCamelCase , lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase , lowerCamelCase , "_weight_quantizer" , lowerCamelCase , lowerCamelCase )
logger.info(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(lowerCamelCase , "_input_quantizer" ) or hasattr(lowerCamelCase , "_weight_quantizer" ):
for n in names:
if re.search(lowerCamelCase , lowerCamelCase ):
set_quantizers(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(lowerCamelCase , lowerCamelCase ):
a__ : List[Any] = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
logger.info(lowerCamelCase )
| 629 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629 | 1 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Tuple = torch.nn.Linear(10 , 10 )
a__ : List[Any] = torch.optim.SGD(model.parameters() , 0.1 )
a__ : List[str] = Accelerator()
a__ : Optional[int] = accelerator.prepare(snake_case )
try:
pickle.loads(pickle.dumps(snake_case ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 629 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
SCREAMING_SNAKE_CASE__ : List[Any] = """sshleifer/bart-tiny-random"""
SCREAMING_SNAKE_CASE__ : Tuple = """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return AutoConfig.from_pretrained(snake_case )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , *a__ : int = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ , *a__ : Any = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ , *a__ : List[str] = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _snake_case ( self ) -> int:
"""simple docstring"""
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 629 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : int = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = AlbertTokenizer
_UpperCamelCase : Dict = AlbertTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : List[Any] = True
def _snake_case ( self ) -> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a__ : List[Any] = AlbertTokenizer(snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
a__ : Dict = "this is a test"
a__ : Union[str, Any] = "this is a test"
return input_text, output_text
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Dict = "<pad>"
a__ : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(snake_case ) , 30_000 )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a__ : int = self.get_tokenizer()
a__ : int = self.get_rust_tokenizer()
a__ : int = "I was born in 92000, and this is falsé."
a__ : Optional[int] = tokenizer.tokenize(snake_case )
a__ : Tuple = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
a__ : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
a__ : Tuple = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
a__ : List[str] = self.get_rust_tokenizer()
a__ : Dict = tokenizer.encode(snake_case )
a__ : Union[str, Any] = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Union[str, Any] = AlbertTokenizer(snake_case , keep_accents=snake_case )
a__ : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [48, 25, 21, 1_289] )
a__ : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
a__ : List[Any] = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(snake_case , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
a__ : Dict = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = AlbertTokenizer(snake_case )
a__ : Optional[int] = tokenizer.encode("sequence builders" )
a__ : Union[str, Any] = tokenizer.encode("multi-sequence build" )
a__ : List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case )
a__ : str = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 629 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629 | 1 |
import baseaa
def _A ( lowerCamelCase ):
return baseaa.baaencode(string.encode("utf-8" ) )
def _A ( lowerCamelCase ):
return baseaa.baadecode(lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = """Hello World!"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = baseaa_encode(test)
print(encoded)
SCREAMING_SNAKE_CASE__ : Optional[int] = baseaa_decode(encoded)
print(decoded)
| 629 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[str] = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""allegro/herbert-base-cased""": 5_1_4}
SCREAMING_SNAKE_CASE__ : List[str] = {}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Union[str, Any] = HerbertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case="</s>" , **snake_case , ) -> str:
"""simple docstring"""
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , sep_token=snake_case , **snake_case , )
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id]
a__ : Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self , snake_case , snake_case = None , snake_case = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Optional[int] = [self.sep_token_id]
a__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Union[str, Any] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Tuple = """transfo-xl"""
_UpperCamelCase : Dict = ["""mems"""]
_UpperCamelCase : int = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case=267_735 , snake_case=[20_000, 40_000, 200_000] , snake_case=1_024 , snake_case=1_024 , snake_case=16 , snake_case=64 , snake_case=4_096 , snake_case=4 , snake_case=False , snake_case=18 , snake_case=1_600 , snake_case=1_000 , snake_case=True , snake_case=True , snake_case=0 , snake_case=-1 , snake_case=True , snake_case=0.1 , snake_case=0.0 , snake_case=True , snake_case="normal" , snake_case=0.01 , snake_case=0.01 , snake_case=0.02 , snake_case=1E-5 , snake_case=0 , **snake_case , ) -> Tuple:
"""simple docstring"""
a__ : Dict = vocab_size
a__ : Optional[int] = []
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
a__ : int = [False] + [True] * len(self.cutoffs )
else:
a__ : Union[str, Any] = [False] + [False] * len(self.cutoffs )
a__ : Dict = d_model
a__ : Optional[Any] = d_embed
a__ : List[str] = d_head
a__ : Any = d_inner
a__ : str = div_val
a__ : str = pre_lnorm
a__ : List[Any] = n_layer
a__ : Union[str, Any] = n_head
a__ : List[Any] = mem_len
a__ : Optional[int] = same_length
a__ : int = attn_type
a__ : List[Any] = clamp_len
a__ : List[Any] = sample_softmax
a__ : Optional[int] = adaptive
a__ : Optional[Any] = dropout
a__ : Dict = dropatt
a__ : Tuple = untie_r
a__ : List[Any] = init
a__ : Union[str, Any] = init_range
a__ : str = proj_init_std
a__ : Union[str, Any] = init_std
a__ : Dict = layer_norm_epsilon
super().__init__(eos_token_id=snake_case , **snake_case )
@property
def _snake_case ( self ) -> Any:
"""simple docstring"""
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 629 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableUnCLIPPipeline
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_UpperCamelCase : Any = False
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Any = 32
a__ : int = embedder_hidden_size
# prior components
torch.manual_seed(0 )
a__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=snake_case , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case , num_layers=1 , )
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
a__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case )
a__ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case , layers_per_block=1 , upcast_attention=snake_case , use_linear_projection=snake_case , )
torch.manual_seed(0 )
a__ : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a__ : Optional[int] = AutoencoderKL()
a__ : Any = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , snake_case , snake_case=0 ) -> Dict:
"""simple docstring"""
if str(snake_case ).startswith("mps" ):
a__ : Union[str, Any] = torch.manual_seed(snake_case )
else:
a__ : List[str] = torch.Generator(device=snake_case ).manual_seed(snake_case )
a__ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=snake_case )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : int = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=snake_case )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
a__ : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe("anime turle" , generator=snake_case , output_type="np" )
a__ : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case , snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
a__ : Union[str, Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Union[str, Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
a__ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 629 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __lowerCAmelCase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
def __init__( self , snake_case=None , **snake_case ) -> str:
"""simple docstring"""
super().__init__(features=snake_case )
a__ : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , snake_case ) -> Any:
"""simple docstring"""
import torch
if isinstance(snake_case , snake_case ) and column:
if all(
isinstance(snake_case , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(snake_case )
return column
def _snake_case ( self , snake_case ) -> Dict:
"""simple docstring"""
import torch
if isinstance(snake_case , (str, bytes, type(snake_case )) ):
return value
elif isinstance(snake_case , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a__ : Optional[Any] = {}
if isinstance(snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
a__ : Optional[Any] = {"dtype": torch.intaa}
elif isinstance(snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a__ : List[Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(snake_case , PIL.Image.Image ):
a__ : Tuple = np.asarray(snake_case )
return torch.tensor(snake_case , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , snake_case ) -> Tuple:
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(snake_case , "__array__" ) and not isinstance(snake_case , torch.Tensor ):
a__ : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(snake_case , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(snake_case ) for substruct in data_struct] )
elif isinstance(snake_case , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(snake_case ) for substruct in data_struct] )
return self._tensorize(snake_case )
def _snake_case ( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
return map_nested(self._recursive_tensorize , snake_case , map_list=snake_case )
def _snake_case ( self , snake_case ) -> Mapping:
"""simple docstring"""
a__ : Union[str, Any] = self.numpy_arrow_extractor().extract_row(snake_case )
a__ : str = self.python_features_decoder.decode_row(snake_case )
return self.recursive_tensorize(snake_case )
def _snake_case ( self , snake_case ) -> "torch.Tensor":
"""simple docstring"""
a__ : Any = self.numpy_arrow_extractor().extract_column(snake_case )
a__ : Union[str, Any] = self.python_features_decoder.decode_column(snake_case , pa_table.column_names[0] )
a__ : Optional[int] = self.recursive_tensorize(snake_case )
a__ : Optional[Any] = self._consolidate(snake_case )
return column
def _snake_case ( self , snake_case ) -> Mapping:
"""simple docstring"""
a__ : int = self.numpy_arrow_extractor().extract_batch(snake_case )
a__ : Tuple = self.python_features_decoder.decode_batch(snake_case )
a__ : str = self.recursive_tensorize(snake_case )
for column_name in batch:
a__ : List[Any] = self._consolidate(batch[column_name] )
return batch
| 629 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : str = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
def _A ( lowerCamelCase = 1000 ):
return sum(e for e in range(3 , lowerCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 629 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCamelCase , lowerCamelCase ) -> bool:
a__ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a__ : Union[str, Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
a__ : Any = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(lowerCamelCase , lowerCamelCase ) ) for _ in range(lowerCamelCase ) ) * (max_value - min_value)
def _A ( lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = 1.0 ):
def identity_function(lowerCamelCase ) -> float:
return x
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : int = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def _A ( lowerCamelCase ):
def function_to_integrate(lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
a__ : int = area_under_curve_estimator(
lowerCamelCase , lowerCamelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 | 1 |
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 9.80665
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 629 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = ["""input_features""", """attention_mask"""]
def __init__( self , snake_case=80 , snake_case=16_000 , snake_case=80 , snake_case=0.0 , snake_case=True , snake_case=True , snake_case=True , **snake_case , ) -> List[str]:
"""simple docstring"""
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
a__ : Tuple = num_mel_bins
a__ : Dict = do_ceptral_normalize
a__ : Any = normalize_means
a__ : List[Any] = normalize_vars
a__ : str = True
def _snake_case ( self , snake_case , ) -> np.ndarray:
"""simple docstring"""
a__ : Any = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
a__ : Tuple = torch.from_numpy(snake_case ).unsqueeze(0 )
a__ : Optional[Any] = ta_kaldi.fbank(snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _snake_case ( snake_case , snake_case , snake_case = True , snake_case = True , snake_case = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
a__ : Optional[Any] = x[:input_length].mean(axis=0 )
a__ : Any = np.subtract(snake_case , snake_case )
if normalize_vars:
a__ : Union[str, Any] = x[:input_length].std(axis=0 )
a__ : Tuple = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
a__ : str = padding_value
# make sure array is in float32
a__ : str = x.astype(np.floataa )
return x
def _snake_case ( self , snake_case , snake_case = None ) -> List[np.ndarray]:
"""simple docstring"""
a__ : List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(snake_case , snake_case , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(snake_case , snake_case )
]
def __call__( self , snake_case , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a__ : str = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
a__ : Union[str, Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a__ : int = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
a__ : Optional[int] = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a__ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ : Optional[int] = [raw_speech]
# extract fbank features
a__ : List[Any] = [self._extract_fbank_features(snake_case ) for waveform in raw_speech]
# convert into correct format for padding
a__ : str = BatchFeature({"input_features": features} )
a__ : List[Any] = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
a__ : Dict = padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case ):
a__ : Optional[int] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
a__ : Union[str, Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
a__ : int = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
a__ : Union[str, Any] = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a__ : Tuple = self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case )
if return_tensors is not None:
a__ : Union[str, Any] = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 629 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Any = {
"""unc-nlp/lxmert-base-uncased""": 5_1_2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ) -> Any:
"""simple docstring"""
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
a__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case ) != tokenize_chinese_chars
):
a__ : str = getattr(snake_case , normalizer_state.pop("type" ) )
a__ : Tuple = do_lower_case
a__ : Union[str, Any] = strip_accents
a__ : str = tokenize_chinese_chars
a__ : List[str] = normalizer_class(**snake_case )
a__ : str = do_lower_case
def _snake_case ( self , snake_case , snake_case=None ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , snake_case , snake_case = None ) -> List[int]:
"""simple docstring"""
a__ : Tuple = [self.sep_token_id]
a__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , snake_case , snake_case = None ) -> Tuple[str]:
"""simple docstring"""
a__ : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
| 629 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ):
_UpperCamelCase : List[str] = """focalnet"""
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=False , snake_case=[192, 384, 768, 768] , snake_case=[2, 2, 6, 2] , snake_case=[2, 2, 2, 2] , snake_case=[3, 3, 3, 3] , snake_case="gelu" , snake_case=4.0 , snake_case=0.0 , snake_case=0.1 , snake_case=False , snake_case=1E-4 , snake_case=False , snake_case=False , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**snake_case )
a__ : Dict = image_size
a__ : Optional[Any] = patch_size
a__ : int = num_channels
a__ : str = embed_dim
a__ : Union[str, Any] = use_conv_embed
a__ : Union[str, Any] = hidden_sizes
a__ : List[Any] = depths
a__ : List[str] = focal_levels
a__ : Optional[Any] = focal_windows
a__ : Optional[int] = hidden_act
a__ : Union[str, Any] = mlp_ratio
a__ : List[str] = hidden_dropout_prob
a__ : Optional[Any] = drop_path_rate
a__ : Any = use_layerscale
a__ : Optional[int] = layerscale_value
a__ : str = use_post_layernorm
a__ : Optional[Any] = use_post_layernorm_in_modulation
a__ : Union[str, Any] = normalize_modulator
a__ : Any = initializer_range
a__ : Tuple = layer_norm_eps
a__ : Union[str, Any] = encoder_stride
a__ : Dict = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
a__ , a__ : Any = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
| 629 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[int] = """mobilenet_v2"""
def __init__( self , snake_case=3 , snake_case=224 , snake_case=1.0 , snake_case=8 , snake_case=8 , snake_case=6 , snake_case=32 , snake_case=True , snake_case=True , snake_case="relu6" , snake_case=True , snake_case=0.8 , snake_case=0.02 , snake_case=0.001 , snake_case=255 , **snake_case , ) -> int:
"""simple docstring"""
super().__init__(**snake_case )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
a__ : str = num_channels
a__ : Dict = image_size
a__ : Any = depth_multiplier
a__ : str = depth_divisible_by
a__ : Optional[int] = min_depth
a__ : Dict = expand_ratio
a__ : str = output_stride
a__ : Optional[int] = first_layer_is_expansion
a__ : Union[str, Any] = finegrained_output
a__ : Union[str, Any] = hidden_act
a__ : str = tf_padding
a__ : List[Any] = classifier_dropout_prob
a__ : List[Any] = initializer_range
a__ : Optional[Any] = layer_norm_eps
a__ : str = semantic_loss_ignore_index
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-4
| 629 | 1 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE__ : int = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
SCREAMING_SNAKE_CASE__ : Any = logging.WARNING
def _A ( ):
a__ : Any = os.getenv("DATASETS_VERBOSITY" , lowerCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def _A ( ):
return __name__.split("." )[0]
def _A ( ):
return logging.getLogger(_get_library_name() )
def _A ( ):
# Apply our default configuration to the library root logger.
a__ : List[str] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _A ( ):
a__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _A ( lowerCamelCase = None ):
if name is None:
a__ : Union[str, Any] = _get_library_name()
return logging.getLogger(lowerCamelCase )
def _A ( ):
return _get_library_root_logger().getEffectiveLevel()
def _A ( lowerCamelCase ):
_get_library_root_logger().setLevel(lowerCamelCase )
def _A ( ):
return set_verbosity(lowerCamelCase )
def _A ( ):
return set_verbosity(lowerCamelCase )
def _A ( ):
return set_verbosity(lowerCamelCase )
def _A ( ):
return set_verbosity(lowerCamelCase )
def _A ( ):
a__ : Dict = False
def _A ( ):
a__ : List[str] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __lowerCAmelCase :
def __init__( self , *snake_case , **snake_case ) -> Optional[Any]: # pylint: disable=unused-argument
"""simple docstring"""
a__ : List[Any] = args[0] if args else None
def __iter__( self ) -> Tuple:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , snake_case ) -> Union[str, Any]:
"""simple docstring"""
def empty_fn(*snake_case , **snake_case ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Tuple:
"""simple docstring"""
return self
def __exit__( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
return
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
class __lowerCAmelCase :
def __call__( self , *snake_case , snake_case=False , **snake_case ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*snake_case , **snake_case )
else:
return EmptyTqdm(*snake_case , **snake_case )
def _snake_case ( self , *snake_case , **snake_case ) -> Dict:
"""simple docstring"""
a__ : str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case , **snake_case )
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE__ : Optional[int] = _tqdm_cls()
def _A ( ):
global _tqdm_active
return bool(_tqdm_active )
def _A ( ):
global _tqdm_active
a__ : str = True
def _A ( ):
global _tqdm_active
a__ : Union[str, Any] = False
| 629 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : int = """upernet"""
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**snake_case )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
a__ : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(snake_case , snake_case ):
a__ : Optional[int] = backbone_config.get("model_type" )
a__ : str = CONFIG_MAPPING[backbone_model_type]
a__ : str = config_class.from_dict(snake_case )
a__ : int = backbone_config
a__ : Optional[Any] = hidden_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = pool_scales
a__ : Optional[Any] = use_auxiliary_head
a__ : Optional[Any] = auxiliary_loss_weight
a__ : Dict = auxiliary_in_channels
a__ : Optional[int] = auxiliary_channels
a__ : Any = auxiliary_num_convs
a__ : Any = auxiliary_concat_input
a__ : int = loss_ignore_index
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = copy.deepcopy(self.__dict__ )
a__ : Optional[Any] = self.backbone_config.to_dict()
a__ : List[Any] = self.__class__.model_type
return output
| 629 | 1 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = UNetaDModel
_UpperCamelCase : Any = """sample"""
@property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Dict = 4
a__ : Optional[Any] = 3
a__ : str = (32, 32)
a__ : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
a__ : Tuple = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
a__ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Dict = UNetaDModel
_UpperCamelCase : Union[str, Any] = """sample"""
@property
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = 4
a__ : Union[str, Any] = 4
a__ : Dict = (32, 32)
a__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
a__ : List[Any] = torch.tensor([10] ).to(snake_case )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
return (4, 32, 32)
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (4, 32, 32)
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
a__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ , a__ : Dict = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(snake_case )
a__ : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , a__ : Optional[int] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=snake_case )
model.to(snake_case )
a__ : Optional[int] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ , a__ : int = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=snake_case )
model_accelerate.to(snake_case )
model_accelerate.eval()
a__ : Optional[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
a__ : List[Any] = noise.to(snake_case )
a__ : Optional[int] = torch.tensor([10] * noise.shape[0] ).to(snake_case )
a__ : Any = model_accelerate(snake_case , snake_case )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
a__ , a__ : Tuple = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=snake_case , low_cpu_mem_usage=snake_case )
model_normal_load.to(snake_case )
model_normal_load.eval()
a__ : Any = model_normal_load(snake_case , snake_case )["sample"]
assert torch_all_close(snake_case , snake_case , rtol=1E-3 )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
a__ : str = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(snake_case )
a__ : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
a__ : Any = noise.to(snake_case )
a__ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case )
with torch.no_grad():
a__ : Dict = model(snake_case , snake_case ).sample
a__ : str = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a__ : Optional[Any] = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-3 ) )
class __lowerCAmelCase ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
_UpperCamelCase : Dict = UNetaDModel
_UpperCamelCase : List[Any] = """sample"""
@property
def _snake_case ( self , snake_case=(32, 32) ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = 4
a__ : Optional[Any] = 3
a__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case )
a__ : Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self ) -> Dict:
"""simple docstring"""
return (3, 32, 32)
@property
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : Tuple = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
a__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ , a__ : Any = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=snake_case )
self.assertIsNotNone(snake_case )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(snake_case )
a__ : int = self.dummy_input
a__ : Union[str, Any] = floats_tensor((4, 3) + (256, 256) ).to(snake_case )
a__ : List[str] = noise
a__ : Any = model(**snake_case )
assert image is not None, "Make sure output is not None"
@slow
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Dict = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(snake_case )
a__ : Optional[Any] = 4
a__ : Any = 3
a__ : Union[str, Any] = (256, 256)
a__ : Union[str, Any] = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
a__ : str = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
a__ : Optional[int] = model(snake_case , snake_case ).sample
a__ : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ : Dict = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(snake_case )
a__ : List[Any] = 4
a__ : Union[str, Any] = 3
a__ : str = (32, 32)
a__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(snake_case )
a__ : str = torch.tensor(batch_size * [1E-4] ).to(snake_case )
with torch.no_grad():
a__ : List[Any] = model(snake_case , snake_case ).sample
a__ : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ : Tuple = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(snake_case , snake_case , rtol=1E-2 ) )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
pass
| 629 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629 | 1 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _A ( lowerCamelCase = "isbn/0140328726" ):
a__ : Dict = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
a__ : Dict = F"""{olid} is not a valid Open Library olid"""
raise ValueError(lowerCamelCase )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _A ( lowerCamelCase ):
a__ : Union[str, Any] = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
a__ : Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
a__ : Any = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
a__ : Optional[Any] = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
a__ : Any = ", ".join(lowerCamelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE__ : Any = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
SCREAMING_SNAKE_CASE__ : Any = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print("""\n""".join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 629 |
# Lint as: python3
import itertools
import os
import re
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(R"""([a-z\d])([A-Z])""")
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(R"""(?<!_)_(?!_)""")
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""(_{2,})""")
SCREAMING_SNAKE_CASE__ : List[Any] = R"""^\w+(\.\w+)*$"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = R"""<>:/\|?*"""
def _A ( lowerCamelCase ):
a__ : List[str] = _uppercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
a__ : Dict = _lowercase_uppercase_re.sub(r"\1_\2" , lowerCamelCase )
return name.lower()
def _A ( lowerCamelCase ):
a__ : Tuple = _single_underscore_re.split(lowerCamelCase )
a__ : Any = [_multiple_underscores_re.split(lowerCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCamelCase ) if n != "" )
def _A ( lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
if os.path.basename(lowerCamelCase ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , lowerCamelCase ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCamelCase )}-{split}"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
a__ : Union[str, Any] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
a__ : Any = os.path.join(lowerCamelCase , lowerCamelCase )
return F"""{filepath}*"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
a__ : List[str] = filename_prefix_for_split(lowerCamelCase , lowerCamelCase )
a__ : Tuple = os.path.join(lowerCamelCase , lowerCamelCase )
if shard_lengths:
a__ : List[str] = len(lowerCamelCase )
a__ : str = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCamelCase )]
if filetype_suffix:
a__ : Optional[Any] = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
a__ : Optional[int] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 629 | 1 |
from __future__ import annotations
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
SCREAMING_SNAKE_CASE__ : Any = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
SCREAMING_SNAKE_CASE__ : List[Any] = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase="dummy_doc" ):
a__ : int = {doc: key_lines}
a__ : List[Any] = {doc: sys_lines}
a__ : str = {}
a__ : Union[str, Any] = 0
a__ : List[str] = 0
a__ : List[str] = 0
a__ : List[Any] = 0
a__ : Dict = 0
a__ : Optional[int] = 0
a__ , a__ : List[str] = reader.get_doc_mentions(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
a__ : Tuple = reader.set_annotated_parse_trees(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase , lowerCamelCase )
a__ , a__ : Tuple = reader.get_doc_mentions(lowerCamelCase , sys_doc_lines[doc] , lowerCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
a__ : int = reader.set_annotated_parse_trees(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase , lowerCamelCase )
if remove_nested:
a__ , a__ : Tuple = reader.remove_nested_coref_mentions(lowerCamelCase , lowerCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
a__ , a__ : Optional[Any] = reader.remove_nested_coref_mentions(lowerCamelCase , lowerCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
a__ : List[Any] = reader.get_mention_assignments(lowerCamelCase , lowerCamelCase )
a__ : List[str] = reader.get_mention_assignments(lowerCamelCase , lowerCamelCase )
a__ : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Any = get_coref_infos(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ : Union[str, Any] = {}
a__ : Dict = 0
a__ : Tuple = 0
for name, metric in metrics:
a__ , a__ , a__ : Any = evaluator.evaluate_documents(lowerCamelCase , lowerCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 100:.2f}""" , F""" Precision: {precision * 100:.2f}""" , F""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
a__ : Union[str, Any] = (conll / 3) * 100
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def _A ( lowerCamelCase ):
a__ : Tuple = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
a__ : Union[str, Any] = line.split()[5]
if not parse_col == "-":
a__ : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _snake_case ( self ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def _snake_case ( self , snake_case , snake_case , snake_case=True , snake_case=False , snake_case=False , snake_case=False ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
a__ : str = util.check_gold_parse_annotation(snake_case )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
a__ : Union[str, Any] = evaluate(
key_lines=snake_case , sys_lines=snake_case , metrics=snake_case , NP_only=snake_case , remove_nested=snake_case , keep_singletons=snake_case , min_span=snake_case , )
return score
| 629 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : int = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 629 | 1 |
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if exponent == 1:
return base
if exponent % 2 == 0:
a__ : int = _modexpt(lowerCamelCase , exponent // 2 , lowerCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase , exponent - 1 , lowerCamelCase )) % modulo_value
def _A ( lowerCamelCase = 1777 , lowerCamelCase = 1855 , lowerCamelCase = 8 ):
a__ : List[Any] = base
for _ in range(1 , lowerCamelCase ):
a__ : Optional[int] = _modexpt(lowerCamelCase , lowerCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 629 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __lowerCAmelCase ( unittest.TestCase ,_UpperCamelCase ):
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
a__ : Dict = load_tool("text-question-answering" , remote=snake_case )
def _snake_case ( self ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = self.tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.remote_tool(snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> Any:
"""simple docstring"""
a__ : Any = self.tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
def _snake_case ( self ) -> int:
"""simple docstring"""
a__ : List[str] = self.remote_tool(text=snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case , "launched the BigScience Research Workshop" )
| 629 | 1 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def _A ( lowerCamelCase ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def _A ( lowerCamelCase ):
class __lowerCAmelCase :
def __init__( self , snake_case ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = metric_id
class __lowerCAmelCase :
_UpperCamelCase : str = [MetricMock(_UpperCamelCase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if "tmp_path" in args:
a__ : str = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCamelCase , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCamelCase )
| 629 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( _UpperCamelCase ):
@require_torch
def _snake_case ( self ) -> str:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Tuple = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Optional[int] = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Tuple = "1"
a__ : List[Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Optional[Any] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : List[Any] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(snake_case )
BertModel.from_pretrained(snake_case )
BertTokenizer.from_pretrained(snake_case )
pipeline(task="fill-mask" , model=snake_case )
# baseline - just load from_pretrained with normal network
a__ : Tuple = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any = self.get_env()
a__ : int = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str = self.get_env()
a__ : List[str] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : List[Any] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] = "1"
a__ : Tuple = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = "\nfrom transformers import pipeline\n "
a__ : int = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Dict = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : List[str] = self.get_env()
a__ : Union[str, Any] = "1"
a__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = "\nfrom transformers import AutoModel\n "
a__ : Any = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : Optional[Any] = self.get_env()
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Dict = "1"
a__ : Union[str, Any] = subprocess.run(snake_case , env=snake_case , check=snake_case , capture_output=snake_case )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 629 | 1 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
a__ : List[Any] = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , lowerCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
assert _test_patching.open is open
a__ : str = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , lowerCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
# pandas.read_csv is not present in _test_patching
a__ : List[str] = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , lowerCamelCase ):
pass
def _A ( ):
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
a__ : Optional[int] = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , lowerCamelCase ) is None
with patch_submodule(_test_patching , "len" , lowerCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
a__ : List[Any] = "__test_patch_submodule_start_and_stop_mock__"
a__ : Union[str, Any] = patch_submodule(_test_patching , "open" , lowerCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
a__ : Union[str, Any] = "__test_patch_submodule_successive_join__"
a__ : Optional[Any] = "__test_patch_submodule_successive_dirname__"
a__ : List[Any] = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , lowerCamelCase ):
with patch_submodule(_test_patching , "os.rename" , lowerCamelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , lowerCamelCase ):
with patch_submodule(_test_patching , "os.path.join" , lowerCamelCase ):
with patch_submodule(_test_patching , "os.path.dirname" , lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
a__ : Tuple = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , lowerCamelCase ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , lowerCamelCase ):
pass
| 629 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : List[str] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 629 | 1 |
def _A ( lowerCamelCase ):
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
a__ : Tuple = ""
while len(lowerCamelCase ) % 3 != 0:
a__ : List[str] = "0" + bin_string
a__ : List[Any] = [
bin_string[index : index + 3]
for index in range(len(lowerCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
a__ : Dict = 0
for index, val in enumerate(lowerCamelCase ):
oct_val += int(2 ** (2 - index) * int(lowerCamelCase ) )
oct_string += str(lowerCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 629 |
from PIL import Image
def _A ( lowerCamelCase , lowerCamelCase ):
def brightness(lowerCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ : List[str] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 629 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.