code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
snake_case__ : List[Any] = logging.get_logger(__name__)
def _snake_case ( _snake_case : Tuple ):
if isinstance(_snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_snake_case ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class snake_case_( a__ ):
__UpperCamelCase = ['''pixel_values''']
def __init__( self : Optional[int] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , **UpperCamelCase_ : Tuple , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_5_6}
lowerCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowerCAmelCase : Tuple = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase : Dict = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
lowerCAmelCase : Any = do_resize
lowerCAmelCase : Union[str, Any] = size
lowerCAmelCase : List[str] = do_center_crop
lowerCAmelCase : int = crop_size
lowerCAmelCase : Dict = resample
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Any = rescale_factor
lowerCAmelCase : List[Any] = offset
lowerCAmelCase : Tuple = do_normalize
lowerCAmelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" in size:
lowerCAmelCase : List[str] = get_resize_output_image_size(UpperCamelCase_ , size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
elif "height" in size and "width" in size:
lowerCAmelCase : Any = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Union[str, Any] , ):
lowerCAmelCase : Tuple = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : List[str] = image.astype(np.floataa )
if offset:
lowerCAmelCase : Union[str, Any] = image - (scale / 2)
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Any , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase : List[str] = to_numpy_array(UpperCamelCase_ )
if do_resize:
lowerCAmelCase : Optional[int] = self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ )
if do_center_crop:
lowerCAmelCase : List[str] = self.center_crop(UpperCamelCase_ , size=UpperCamelCase_ )
if do_rescale:
lowerCAmelCase : str = self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , offset=UpperCamelCase_ )
if do_normalize:
lowerCAmelCase : Optional[int] = self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ )
lowerCAmelCase : str = to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ )
return image
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : List[str] , ):
lowerCAmelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Any = resample if resample is not None else self.resample
lowerCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : str = offset if offset is not None else self.offset
lowerCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Any = image_std if image_std is not None else self.image_std
lowerCAmelCase : List[str] = size if size is not None else self.size
lowerCAmelCase : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase : Any = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowerCAmelCase : List[str] = make_batched(UpperCamelCase_ )
lowerCAmelCase : Dict = [
[
self._preprocess_image(
image=UpperCamelCase_ , do_resize=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , crop_size=UpperCamelCase_ , do_rescale=UpperCamelCase_ , rescale_factor=UpperCamelCase_ , offset=UpperCamelCase_ , do_normalize=UpperCamelCase_ , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ , data_format=UpperCamelCase_ , )
for img in video
]
for video in videos
]
lowerCAmelCase : Optional[Any] = {'''pixel_values''': videos}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 60 | """simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261 | 0 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : int = 1.5
lowerCAmelCase__ : str = int(factor * num_class_images )
lowerCAmelCase__ : Optional[Any] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCAmelCase , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=__UpperCAmelCase )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowerCAmelCase__ : List[Any] = client.query(text=__UpperCAmelCase )
if len(__UpperCAmelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
lowerCAmelCase__ : List[Any] = int(factor * num_images )
lowerCAmelCase__ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCAmelCase , aesthetic_weight=0.1 , )
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Union[str, Any] = tqdm(desc="""downloading real regularization images""" , total=__UpperCAmelCase )
with open(f"""{class_data_dir}/caption.txt""" , """w""" ) as fa, open(f"""{class_data_dir}/urls.txt""" , """w""" ) as fa, open(
f"""{class_data_dir}/images.txt""" , """w""" ) as fa:
while total < num_class_images:
lowerCAmelCase__ : List[Any] = class_images[count]
count += 1
try:
lowerCAmelCase__ : List[Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowerCAmelCase__ : str = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase_ ( ) -> Dict:
lowerCAmelCase__ : Dict = argparse.ArgumentParser("""""" , add_help=__UpperCAmelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCAmelCase , type=__UpperCAmelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCAmelCase , type=__UpperCAmelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
_A = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 212 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _lowerCamelCase :
def __init__( self : str , UpperCamelCase : int , UpperCamelCase : str=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Dict=7 , UpperCamelCase : List[Any]=9 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=32 , UpperCamelCase : str=5 , UpperCamelCase : int=4 , UpperCamelCase : Optional[Any]=37 , UpperCamelCase : Tuple=8 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=0.002 , UpperCamelCase : List[Any]=1 , UpperCamelCase : Any=0 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : Dict=None , UpperCamelCase : str=None , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : List[str] = encoder_seq_length
lowerCAmelCase__ : Any = decoder_seq_length
# For common tests
lowerCAmelCase__ : Union[str, Any] = self.decoder_seq_length
lowerCAmelCase__ : List[Any] = is_training
lowerCAmelCase__ : Optional[Any] = use_attention_mask
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : int = d_ff
lowerCAmelCase__ : int = relative_attention_num_buckets
lowerCAmelCase__ : Union[str, Any] = dropout_rate
lowerCAmelCase__ : str = initializer_factor
lowerCAmelCase__ : Tuple = eos_token_id
lowerCAmelCase__ : List[str] = pad_token_id
lowerCAmelCase__ : str = decoder_start_token_id
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Dict = decoder_layers
def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def _lowerCAmelCase ( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : int=None , UpperCamelCase : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase__ : List[str] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase__ : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase )
if decoder_head_mask is None:
lowerCAmelCase__ : Optional[int] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
if cross_attn_head_mask is None:
lowerCAmelCase__ : Optional[int] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase__ : int = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ : Tuple = self.get_config()
lowerCAmelCase__ : Dict = config.num_attention_heads
lowerCAmelCase__ : Dict = self.prepare_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return config, input_dict
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Any , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = UMTaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCAmelCase__ : Optional[int] = model(
input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase , attention_mask=UpperCamelCase , decoder_attention_mask=UpperCamelCase , )
lowerCAmelCase__ : Optional[int] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase )
lowerCAmelCase__ : List[Any] = result.last_hidden_state
lowerCAmelCase__ : Any = result.past_key_values
lowerCAmelCase__ : str = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = UMTaModel(config=UpperCamelCase ).get_decoder().to(UpperCamelCase ).eval()
# first forward pass
lowerCAmelCase__ : Optional[int] = model(UpperCamelCase , use_cache=UpperCamelCase )
lowerCAmelCase__ : Any = model(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model(UpperCamelCase , use_cache=UpperCamelCase )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) )
self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCAmelCase__ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : List[str] = model(UpperCamelCase )["""last_hidden_state"""]
lowerCAmelCase__ : Any = model(UpperCamelCase , past_key_values=UpperCamelCase )["""last_hidden_state"""]
# select random slice
lowerCAmelCase__ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase__ : Optional[int] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int , ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = UMTaModel(config=UpperCamelCase ).to(UpperCamelCase ).half().eval()
lowerCAmelCase__ : Dict = model(**UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(UpperCamelCase ).any().item() )
@require_torch
class _lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
_lowerCamelCase :List[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowerCamelCase :List[str] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowerCamelCase :Optional[Any] = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowerCamelCase :Dict = True
_lowerCamelCase :Optional[Any] = False
_lowerCamelCase :List[str] = False
_lowerCamelCase :Dict = True
_lowerCamelCase :str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowerCamelCase :Optional[int] = [0.8, 0.9]
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[str] = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = config_and_inputs[0]
lowerCAmelCase__ : int = UMTaForConditionalGeneration(UpperCamelCase ).eval()
model.to(UpperCamelCase )
lowerCAmelCase__ : List[Any] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ),
}
for attn_name, (name, mask) in zip(UpperCamelCase , head_masking.items() ):
lowerCAmelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCAmelCase__ : Tuple = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase , return_dict_in_generate=UpperCamelCase , **UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCAmelCase__ : str = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=UpperCamelCase ).to(UpperCamelCase )
lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=UpperCamelCase , legacy=UpperCamelCase )
lowerCAmelCase__ : int = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
lowerCAmelCase__ : Union[str, Any] = tokenizer(UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase ).input_ids
# fmt: off
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = model.generate(input_ids.to(UpperCamelCase ) )
lowerCAmelCase__ : int = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
lowerCAmelCase__ : Any = tokenizer.batch_decode(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 212 | 1 |
"""simple docstring"""
def __a ( __lowerCamelCase ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
UpperCAmelCase_ : str = sorted(string.lower() )
return len(__lowerCamelCase ) == len(set(__lowerCamelCase ) )
if __name__ == "__main__":
_a = input('Enter a string ').strip()
_a = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 61 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=7 , __lowercase=3 , __lowercase=1_8 , __lowercase=3_0 , __lowercase=4_0_0 , __lowercase=True , __lowercase=None , __lowercase=True , __lowercase=None , ) -> List[Any]:
lowerCAmelCase_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_0}
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[str] = min_resolution
lowerCAmelCase_ : Dict = max_resolution
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Union[str, Any] = do_center_crop
lowerCAmelCase_ : Optional[Any] = crop_size
def lowercase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = MobileNetVaImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowercase , '''size''' ) )
self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowercase , '''crop_size''' ) )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def lowercase_ ( self ) -> Tuple:
pass
def lowercase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , Image.Image )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[int]:
# Initialize image_processing
lowerCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Tuple = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase_ : Dict = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 262 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : int = """blenderbot-small"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : str=5_0_2_6_5 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Any=8 , UpperCamelCase__ : Any=2_0_4_8 , UpperCamelCase__ : Union[str, Any]=1_6 , UpperCamelCase__ : Tuple=8 , UpperCamelCase__ : List[str]=2_0_4_8 , UpperCamelCase__ : Tuple=1_6 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Union[str, Any]=5_1_2 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : int=1 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : str=2 , **UpperCamelCase__ : Tuple , )-> Any:
'''simple docstring'''
__lowerCAmelCase: List[str] = vocab_size
__lowerCAmelCase: Optional[Any] = max_position_embeddings
__lowerCAmelCase: str = d_model
__lowerCAmelCase: Any = encoder_ffn_dim
__lowerCAmelCase: Tuple = encoder_layers
__lowerCAmelCase: Union[str, Any] = encoder_attention_heads
__lowerCAmelCase: Any = decoder_ffn_dim
__lowerCAmelCase: Any = decoder_layers
__lowerCAmelCase: Union[str, Any] = decoder_attention_heads
__lowerCAmelCase: List[str] = dropout
__lowerCAmelCase: List[str] = attention_dropout
__lowerCAmelCase: List[Any] = activation_dropout
__lowerCAmelCase: int = activation_function
__lowerCAmelCase: Optional[int] = init_std
__lowerCAmelCase: Union[str, Any] = encoder_layerdrop
__lowerCAmelCase: str = decoder_layerdrop
__lowerCAmelCase: int = use_cache
__lowerCAmelCase: Tuple = encoder_layers
__lowerCAmelCase: Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class snake_case ( __snake_case ):
@property
def lowercase_ ( self : str)-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase: int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
__lowerCAmelCase: Any = {0: "batch"}
__lowerCAmelCase: List[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__lowerCAmelCase: int = {0: "batch", 1: "decoder_sequence"}
__lowerCAmelCase: Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs")
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCAmelCase: Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase: str = self.num_layers
for i in range(UpperCamelCase__):
__lowerCAmelCase: List[str] = {0: "batch", 2: "past_sequence + sequence"}
__lowerCAmelCase: Dict = {0: "batch", 2: "past_sequence + sequence"}
else:
__lowerCAmelCase: Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
])
return common_inputs
@property
def lowercase_ ( self : List[str])-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase: int = super().outputs
else:
__lowerCAmelCase: Dict = super(UpperCamelCase__ , self).outputs
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase: Tuple = self.num_layers
for i in range(UpperCamelCase__):
__lowerCAmelCase: Any = {0: "batch", 2: "past_sequence + sequence"}
__lowerCAmelCase: Any = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowercase_ ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , )-> Mapping[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
# Generate decoder inputs
__lowerCAmelCase: int = seq_length if not self.use_past else 1
__lowerCAmelCase: Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Dict = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowerCAmelCase: Tuple = dict(**UpperCamelCase__ , **UpperCamelCase__)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
__lowerCAmelCase , __lowerCAmelCase: str = common_inputs["input_ids"].shape
__lowerCAmelCase: Any = common_inputs["decoder_input_ids"].shape[1]
__lowerCAmelCase , __lowerCAmelCase: List[str] = self.num_attention_heads
__lowerCAmelCase: int = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase: List[str] = decoder_seq_length + 3
__lowerCAmelCase: Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCAmelCase: Tuple = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__)] , dim=1)
__lowerCAmelCase: List[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCAmelCase , __lowerCAmelCase: int = self.num_layers
__lowerCAmelCase: List[str] = min(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Optional[int] = max(UpperCamelCase__ , UpperCamelCase__) - min_num_layers
__lowerCAmelCase: Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(UpperCamelCase__):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__),
torch.zeros(UpperCamelCase__),
torch.zeros(UpperCamelCase__),
torch.zeros(UpperCamelCase__),
))
# TODO: test this.
__lowerCAmelCase: Dict = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__), torch.zeros(UpperCamelCase__)))
return common_inputs
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , )-> Mapping[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
__lowerCAmelCase , __lowerCAmelCase: Tuple = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowerCAmelCase: Optional[int] = seqlen + 2
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.num_layers
__lowerCAmelCase , __lowerCAmelCase: Dict = self.num_attention_heads
__lowerCAmelCase: Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase: Tuple = common_inputs["attention_mask"].dtype
__lowerCAmelCase: Optional[Any] = torch.cat(
[common_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__)] , dim=1)
__lowerCAmelCase: List[Any] = [
(torch.zeros(UpperCamelCase__), torch.zeros(UpperCamelCase__)) for _ in range(UpperCamelCase__)
]
return common_inputs
def lowercase_ ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , )-> Mapping[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Any = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCAmelCase: str = tokenizer.num_special_tokens_to_add(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__)
# Generate dummy inputs according to compute batch and sequence
__lowerCAmelCase: List[Any] = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
__lowerCAmelCase: List[Any] = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__))
return common_inputs
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , )-> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase: List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__)
elif self.task == "causal-lm":
__lowerCAmelCase: Optional[int] = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__)
else:
__lowerCAmelCase: Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__)
return common_inputs
def lowercase_ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int])-> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase: Optional[int] = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
else:
__lowerCAmelCase: Optional[Any] = super(UpperCamelCase__ , self)._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
| 108 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: int = args.log_outputs
__lowerCAmelCase: Tuple = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
__lowerCAmelCase: str = load_metric("wer" )
__lowerCAmelCase: int = load_metric("cer" )
# compute metrics
__lowerCAmelCase: Optional[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
__lowerCAmelCase: Dict = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
__lowerCAmelCase: Optional[int] = F"WER: {wer_result}\nCER: {cer_result}"
print(__SCREAMING_SNAKE_CASE )
with open(F"{dataset_id}_eval_results.txt" , "w" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__lowerCAmelCase: int = F"log_{dataset_id}_predictions.txt"
__lowerCAmelCase: Tuple = F"log_{dataset_id}_targets.txt"
with open(__SCREAMING_SNAKE_CASE , "w" ) as p, open(__SCREAMING_SNAKE_CASE , "w" ) as t:
# mapping function to write output
def write_to_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
p.write(F"{i}" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F"{i}" + "\n" )
t.write(batch["target"] + "\n" )
result.map(__SCREAMING_SNAKE_CASE , with_indices=__SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: Any = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__lowerCAmelCase: List[Any] = re.sub(__SCREAMING_SNAKE_CASE , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__lowerCAmelCase: Optional[int] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
__lowerCAmelCase: str = " ".join(text.split(__SCREAMING_SNAKE_CASE ) )
return text
def a__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
# load dataset
__lowerCAmelCase: List[str] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__SCREAMING_SNAKE_CASE )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__lowerCAmelCase: Dict = AutoFeatureExtractor.from_pretrained(args.model_id )
__lowerCAmelCase: str = feature_extractor.sampling_rate
# resample audio
__lowerCAmelCase: Tuple = dataset.cast_column("audio" , Audio(sampling_rate=__SCREAMING_SNAKE_CASE ) )
# load eval pipeline
if args.device is None:
__lowerCAmelCase: List[Any] = 0 if torch.cuda.is_available() else -1
__lowerCAmelCase: Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__lowerCAmelCase: Any = prediction["text"]
__lowerCAmelCase: List[str] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
__lowerCAmelCase: str = dataset.map(__SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
__A = parser.parse_args()
main(args)
| 108 | 1 |
a__: str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a__: Tuple = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a__: List[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 193 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A_ ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_lowerCAmelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def A_ ( ) -> Tuple:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def A_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_lowerCAmelCase ):
http_head("https://huggingface.co" )
| 52 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__A : int = 1
@register_to_config
def __init__( self , lowercase = 1000 , lowercase = None) -> Any:
'''simple docstring'''
self.set_timesteps(lowercase)
# standard deviation of the initial noise distribution
a__ : int = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ : Tuple = 4
# running values
a__ : Dict = []
def __lowercase ( self , lowercase , lowercase = None) -> Tuple:
'''simple docstring'''
a__ : Tuple = num_inference_steps
a__ : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1)[:-1]
a__ : Optional[Any] = torch.cat([steps, torch.tensor([0.0])])
if self.config.trained_betas is not None:
a__ : Optional[Any] = torch.tensor(self.config.trained_betas , dtype=torch.floataa)
else:
a__ : Dict = torch.sin(steps * math.pi / 2) ** 2
a__ : Dict = (1.0 - self.betas**2) ** 0.5
a__ : Any = (torch.atana(self.betas , self.alphas) / math.pi * 2)[:-1]
a__ : Optional[Any] = timesteps.to(lowercase)
a__ : Dict = []
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler')
a__ : List[Any] = (self.timesteps == timestep).nonzero().item()
a__ : Optional[int] = timestep_index + 1
a__ : int = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowercase)
if len(self.ets) == 1:
a__ : List[str] = self.ets[-1]
elif len(self.ets) == 2:
a__ : Tuple = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets) == 3:
a__ : Dict = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ : Union[str, Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ : int = self._get_prev_sample(lowercase , lowercase , lowercase , lowercase)
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase)
def __lowercase ( self , lowercase , *lowercase , **lowercase) -> torch.FloatTensor:
'''simple docstring'''
return sample
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] = self.alphas[timestep_index]
a__ : Union[str, Any] = self.betas[timestep_index]
a__ : str = self.alphas[prev_timestep_index]
a__ : Tuple = self.betas[prev_timestep_index]
a__ : Dict = (sample - sigma * ets) / max(lowercase , 1e-8)
a__ : Dict = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self) -> int:
'''simple docstring'''
return self.config.num_train_timesteps
| 225 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase : Optional[int] = 1_6
lowercase : Optional[int] = 3_2
def A_ ( A__ ) -> Tuple:
return int(x / 2**20 )
class A__ :
"""simple docstring"""
def __enter__( self) -> Dict:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
a__ : Union[str, Any] = torch.cuda.memory_allocated()
return self
def __exit__( self , *lowercase) -> List[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
a__ : Optional[int] = torch.cuda.memory_allocated()
a__ : int = torch.cuda.max_memory_allocated()
a__ : Optional[int] = bamb(self.end - self.begin)
a__ : int = bamb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A_ ( A__ , A__ = 16 , A__ = "bert-base-cased" , A__ = 320 , A__ = 160 , ) -> Dict:
a__ : int = AutoTokenizer.from_pretrained(A__ )
a__ : str = load_dataset(
'glue' , 'mrpc' , split={'train': F'train[:{n_train}]', 'validation': F'validation[:{n_val}]'} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
a__ : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ : List[str] = datasets.map(
A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(A__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
a__ : Dict = DataLoader(
tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
a__ : Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def A_ ( A__ , A__ ) -> Any:
# Initialize accelerator
a__ : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : Union[str, Any] = config['lr']
a__ : List[Any] = int(config['num_epochs'] )
a__ : Union[str, Any] = int(config['seed'] )
a__ : Tuple = int(config['batch_size'] )
a__ : int = args.model_name_or_path
set_seed(A__ )
a__ , a__ : Any = get_dataloaders(A__ , A__ , A__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
a__ : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ : Any = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
a__ : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
a__ : str = 1
a__ : Optional[int] = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
a__ : Any = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Optional[int] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
a__ : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
a__ : Union[str, Any] = 0
# Now we train the model
a__ : List[str] = {}
for epoch in range(A__ , A__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A__ ):
a__ : Optional[Any] = model(**A__ )
a__ : List[Any] = outputs.loss
a__ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
a__ : List[str] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(A__ , A__ )
def A_ ( ) -> int:
a__ : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=A__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A__ , )
parser.add_argument(
'--output_dir' , type=A__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=A__ , default=A__ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=A__ , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=A__ , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=A__ , default=1 , help='Number of train epochs.' , )
a__ : Dict = parser.parse_args()
a__ : List[str] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 225 | 1 |
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class A__ :
def __init__( self , __magic_name__ ):
if isinstance(__magic_name__ , __magic_name__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCamelCase : Any = deepcopy(__magic_name__ )
elif os.path.exists(__magic_name__ ):
with io.open(__magic_name__ , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase : Tuple = json.load(__magic_name__ )
else:
try:
lowerCamelCase : Tuple = baseaa.urlsafe_baadecode(__magic_name__ ).decode("""utf-8""" )
lowerCamelCase : Optional[int] = json.loads(__magic_name__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
lowerCamelCase : Union[str, Any] = config
self.set_stage_and_offload()
def UpperCamelCase__ ( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCamelCase : str = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCamelCase : Union[str, Any] = False
if self.is_zeroa() or self.is_zeroa():
lowerCamelCase : Tuple = set(["""cpu""", """nvme"""] )
lowerCamelCase : List[Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCamelCase : Tuple = True
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Optional[Any] = self.config
# find the config node of interest if it exists
lowerCamelCase : int = ds_key_long.split(""".""" )
lowerCamelCase : Optional[int] = nodes.pop()
for node in nodes:
lowerCamelCase : List[str] = config.get(__magic_name__ )
if config is None:
return None, ds_key
return config, ds_key
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=None ):
lowerCamelCase , lowerCamelCase : Optional[Any] = self.find_config_node(__magic_name__ )
if config is None:
return default
return config.get(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False ):
lowerCamelCase : int = self.config
# find the config node of interest if it exists
lowerCamelCase : Dict = ds_key_long.split(""".""" )
for node in nodes:
lowerCamelCase : Optional[Any] = config
lowerCamelCase : Dict = config.get(__magic_name__ )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Optional[int] = self.get_value(__magic_name__ )
return False if value is None else bool(__magic_name__ )
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Tuple = self.get_value(__magic_name__ )
return False if value is None else not bool(__magic_name__ )
def UpperCamelCase__ ( self ):
return self._stage == 2
def UpperCamelCase__ ( self ):
return self._stage == 3
def UpperCamelCase__ ( self ):
return self._offload
class A__ :
def __init__( self , __magic_name__ ):
lowerCamelCase : str = engine
def UpperCamelCase__ ( self , __magic_name__ , **__magic_name__ ):
# runs backpropagation and handles mixed precision
self.engine.backward(__magic_name__ , **__magic_name__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , __magic_name__ ):
super().__init__(__magic_name__ , device_placement=__magic_name__ , scaler=__magic_name__ )
lowerCamelCase : Dict = hasattr(self.optimizer , """overflow""" )
def UpperCamelCase__ ( self , __magic_name__=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def UpperCamelCase__ ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def UpperCamelCase__ ( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , __magic_name__ , __magic_name__ ):
super().__init__(__magic_name__ , __magic_name__ )
def UpperCamelCase__ ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class A__ :
def __init__( self , __magic_name__ , __magic_name__=0.001 , __magic_name__=0 , **__magic_name__ ):
lowerCamelCase : List[Any] = params
lowerCamelCase : Dict = lr
lowerCamelCase : Optional[int] = weight_decay
lowerCamelCase : int = kwargs
class A__ :
def __init__( self , __magic_name__ , __magic_name__=None , __magic_name__=0 , **__magic_name__ ):
lowerCamelCase : Dict = optimizer
lowerCamelCase : Dict = total_num_steps
lowerCamelCase : List[str] = warmup_num_steps
lowerCamelCase : str = kwargs
| 287 |
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Any = logging.get_logger(__name__)
A : List[Any] = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''roberta'''
def __init__( self : int , __magic_name__ : Optional[int]=50_265 , __magic_name__ : List[str]=768 , __magic_name__ : Any=12 , __magic_name__ : List[str]=12 , __magic_name__ : Union[str, Any]=3_072 , __magic_name__ : List[str]="gelu" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Tuple=512 , __magic_name__ : List[str]=2 , __magic_name__ : Union[str, Any]=0.02 , __magic_name__ : List[str]=1e-12 , __magic_name__ : int=1 , __magic_name__ : Tuple=0 , __magic_name__ : Tuple=2 , __magic_name__ : str="absolute" , __magic_name__ : Optional[Any]=True , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] , ) -> int:
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = classifier_dropout
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def __A ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 358 | import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : int ) -> Any:
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
SCREAMING_SNAKE_CASE_ = BlipaProcessor(__magic_name__ , __magic_name__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self : str , **__magic_name__ : int ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).tokenizer
def __A ( self : Dict , **__magic_name__ : List[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ).image_processor
def __A ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __A ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = image_processor(__magic_name__ , return_tensors="np" )
SCREAMING_SNAKE_CASE_ = processor(images=__magic_name__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : str ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer(__magic_name__ , return_token_type_ids=__magic_name__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __A ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ = processor.batch_decode(__magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __A ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE_ = self.get_image_processor()
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = BlipaProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "lower newer"
SCREAMING_SNAKE_CASE_ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ = processor(text=__magic_name__ , images=__magic_name__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 305 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCamelCase ( unittest.TestCase ):
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_uncond_unet
lowerCamelCase_ =ScoreSdeVeScheduler()
lowerCamelCase_ =ScoreSdeVePipeline(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
sde_ve.to(lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase ).images
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase, return_dict=lowerCAmelCase )[
0
]
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''google/ncsnpp-church-256'''
lowerCamelCase_ =UNetaDModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =ScoreSdeVeScheduler.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =ScoreSdeVePipeline(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
sde_ve.to(lowerCAmelCase )
sde_ve.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =sde_ve(num_inference_steps=10, output_type='''numpy''', generator=lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase_ =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 75 | import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65 | 0 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
lowerCamelCase__ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase__ : List[str] = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_UpperCAmelCase ) == 26
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
lowerCamelCase__ : Optional[int] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase__ : int = True
elif char.isupper():
lowerCamelCase__ : List[Any] = True
return all(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def SCREAMING_SNAKE_CASE ( ) -> None:
from timeit import timeit
lowerCamelCase__ : Optional[int] = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=_UpperCAmelCase ) )
print(timeit('is_pangram_faster()' , setup=_UpperCAmelCase ) )
print(timeit('is_pangram_fastest()' , setup=_UpperCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 45 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> None:
lowerCamelCase__ : Optional[Any] = len(_UpperCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_UpperCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCAmelCase , _UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
lowerCamelCase__ : list[list[str]] = []
depth_first_search([] , [] , [] , _UpperCAmelCase , _UpperCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(_UpperCAmelCase )
print('' )
print(len(_UpperCAmelCase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 45 | 1 |
from __future__ import annotations
A : Optional[int] = []
def __lowerCAmelCase ( a__ , a__ , a__ ) -> bool:
for i in range(len(a__ ) ):
if board[row][i] == 1:
return False
for i in range(len(a__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(a__ , -1 , -1 ) , range(a__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(a__ , -1 , -1 ) , range(a__ , len(a__ ) ) ):
if board[i][j] == 1:
return False
return True
def __lowerCAmelCase ( a__ , a__ ) -> bool:
if row >= len(a__ ):
solution.append(a__ )
printboard(a__ )
print()
return True
for i in range(len(a__ ) ):
if is_safe(a__ , a__ , a__ ):
__a = 1
solve(a__ , row + 1 )
__a = 0
return False
def __lowerCAmelCase ( a__ ) -> None:
for i in range(len(a__ ) ):
for j in range(len(a__ ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
A : int = 8
A : Optional[int] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution)) | 6 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 6 | 1 |
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 2000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[0 for i in range(n + 1 )]
lowerCamelCase__ : List[Any] =1
lowerCamelCase__ : Dict =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __lowerCamelCase ):
lowerCamelCase__ : List[str] =1
lowerCamelCase__ : Optional[Any] =0
for i in range(__lowerCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'{solution() = }')
| 369 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Union[str, Any] )-> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self : str )-> Any:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[int] =controlnet_params
lowerCamelCase__ : Dict ='''bird'''
lowerCamelCase__ : List[str] =jax.device_count()
lowerCamelCase__ : Optional[Any] =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ : Optional[int] =jax.random.PRNGKey(0 )
lowerCamelCase__ : Dict =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : Tuple =replicate(lowerCamelCase )
lowerCamelCase__ : Tuple =shard(lowerCamelCase )
lowerCamelCase__ : Optional[int] =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : Any =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Dict =jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def snake_case ( self : Optional[int] )-> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : Dict =FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''', from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=lowerCamelCase, from_pt=lowerCamelCase, dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[Any] =controlnet_params
lowerCamelCase__ : int ='''Chef in the kitchen'''
lowerCamelCase__ : Optional[Any] =jax.device_count()
lowerCamelCase__ : Any =pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
lowerCamelCase__ : List[Any] =pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ : Tuple =jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[Any] =jax.random.split(lowerCamelCase, jax.device_count() )
lowerCamelCase__ : int =replicate(lowerCamelCase )
lowerCamelCase__ : List[Any] =shard(lowerCamelCase )
lowerCamelCase__ : int =shard(lowerCamelCase )
lowerCamelCase__ : Tuple =pipe(
prompt_ids=lowerCamelCase, image=lowerCamelCase, params=lowerCamelCase, prng_seed=lowerCamelCase, num_inference_steps=50, jit=lowerCamelCase, ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : List[str] =images[0, 253:256, 253:256, -1]
lowerCamelCase__ : int =jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Any =jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 272 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=30 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=None , lowerCAmelCase_=2 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = scope
_snake_case = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_snake_case = (image_size // patch_size) ** 2
_snake_case = num_patches + 2
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = TFDeiTModel(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = TFDeiTForMaskedImageModeling(config=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFDeiTForMaskedImageModeling(lowerCAmelCase_ )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.type_sequence_label_size
_snake_case = TFDeiTForImageClassification(lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFDeiTForImageClassification(lowerCAmelCase_ )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
_snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
__lowercase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowercase = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = TFDeiTModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , tf.keras.layers.Dense ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowerCAmelCase_ )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
"""simple docstring"""
_snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFDeiTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
_snake_case = model(**lowerCAmelCase_ )
# verify the logits
_snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_snake_case = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 42 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = StableDiffusionDiffEditPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def lowercase ( self : Any ) -> Dict:
torch.manual_seed(0 )
lowercase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase, )
lowercase : Tuple = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_one=lowerCAmelCase, )
lowercase : Any = DDIMInverseScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_zero=lowerCAmelCase, )
torch.manual_seed(0 )
lowercase : int = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
lowercase : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
lowercase : str = CLIPTextModel(lowerCAmelCase )
lowercase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Tuple=0 ) -> Union[str, Any]:
lowercase : List[Any] = floats_tensor((1, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[Any] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Tuple = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Dict=0 ) -> Optional[Any]:
lowercase : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : List[str] = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : List[Any] = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Optional[int], lowerCAmelCase : Any, lowerCAmelCase : List[str]=0 ) -> Union[str, Any]:
lowercase : Optional[int] = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Tuple = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Tuple = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : List[str] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Union[str, Any] = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Optional[int] ) -> str:
if not hasattr(self.pipeline_class, '_optional_components' ):
return
lowercase : Optional[int] = self.get_dummy_components()
lowercase : int = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase : List[Any] = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Any = pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
lowercase : Any = self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase, lowerCAmelCase ) is None, f'''`{optional_component}` did not stay set to None after loading.''', )
lowercase : Tuple = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Optional[Any] = pipe_loaded(**lowerCAmelCase )[0]
lowercase : List[Any] = np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase, 1e-4 )
def lowercase ( self : Any ) -> str:
lowercase : Union[str, Any] = 'cpu'
lowercase : Optional[int] = self.get_dummy_components()
lowercase : List[str] = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_mask_inputs(lowerCAmelCase )
lowercase : str = pipe.generate_mask(**lowerCAmelCase )
lowercase : str = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
lowercase : List[str] = np.array([0] * 9 )
lowercase : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def lowercase ( self : int ) -> str:
lowercase : int = 'cpu'
lowercase : Dict = self.get_dummy_components()
lowercase : Optional[int] = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_inversion_inputs(lowerCAmelCase )
lowercase : Tuple = pipe.invert(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowercase : List[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
lowercase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
def lowercase ( self : str ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def lowercase ( self : List[str] ) -> Tuple:
lowercase : Dict = 'cpu'
lowercase : Any = self.get_dummy_components()
lowercase : List[Any] = {'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
lowercase : List[str] = DPMSolverMultistepScheduler(**lowerCAmelCase )
lowercase : Dict = DPMSolverMultistepInverseScheduler(**lowerCAmelCase )
lowercase : str = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : List[Any] = self.get_dummy_inversion_inputs(lowerCAmelCase )
lowercase : int = pipe.invert(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowercase : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
lowercase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowercase ( cls : Optional[int] ) -> Tuple:
lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
lowercase : Optional[Any] = raw_image.convert('RGB' ).resize((768, 768) )
lowercase : Any = raw_image
def lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase : str = torch.manual_seed(0 )
lowercase : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : List[Any] = 'a bowl of fruit'
lowercase : List[Any] = 'a bowl of pears'
lowercase : int = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowercase : Tuple = pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase ).latents
lowercase : str = pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, output_type='numpy', ).images[0]
lowercase : Dict = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase : Dict = torch.manual_seed(0 )
lowercase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Union[str, Any] = 'a bowl of fruit'
lowercase : List[Any] = 'a bowl of pears'
lowercase : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowercase : List[str] = pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase, num_inference_steps=25, ).latents
lowercase : int = pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
lowercase : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 255 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_lowercase : List[Any] = """http://www.mocksite.com/file1.txt"""
_lowercase : Any = """\"text\": [\"foo\", \"foo\"]"""
_lowercase : int = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class __magic_name__ :
UpperCamelCase__ = 200
UpperCamelCase__ = {"""Content-Length""": """100"""}
UpperCamelCase__ = {}
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **lowercase_ : Any ):
return [bytes(__SCREAMING_SNAKE_CASE , """utf-8""" )]
def lowerCamelCase ( *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict ) -> Dict:
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ) -> List[str]:
import requests
monkeypatch.setattr(UpperCAmelCase__ , """request""" , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = URL
if issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Optional[int] = url
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Tuple = [url]
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Tuple = {"""train""": url}
lowercase_ : Tuple = """dummy"""
lowercase_ : Dict = """downloads"""
lowercase_ : Union[str, Any] = tmp_path
lowercase_ : Any = DownloadConfig(
cache_dir=os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , use_etag=UpperCAmelCase__ , )
lowercase_ : str = DownloadManager(dataset_name=UpperCAmelCase__ , download_config=UpperCAmelCase__ )
lowercase_ : Any = dl_manager.download(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Dict = [downloaded_paths]
lowercase_ : Optional[Any] = [urls]
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
assert "train" in downloaded_paths.keys()
lowercase_ : List[str] = downloaded_paths.values()
lowercase_ : int = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowercase_ : str = Path(UpperCAmelCase__ )
lowercase_ : Tuple = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowercase_ : List[str] = downloaded_path.read_text()
assert content == CONTENT
lowercase_ : List[str] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
lowercase_ : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
lowercase_ : Dict = str(UpperCAmelCase__ )
if issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : str = filename
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : str = [filename]
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Optional[int] = {"""train""": filename}
lowercase_ : Dict = """dummy"""
lowercase_ : Optional[Any] = xz_file.parent
lowercase_ : Any = """extracted"""
lowercase_ : int = DownloadConfig(
cache_dir=UpperCAmelCase__ , use_etag=UpperCAmelCase__ , )
lowercase_ : List[str] = DownloadManager(dataset_name=UpperCAmelCase__ , download_config=UpperCAmelCase__ )
lowercase_ : Optional[int] = dl_manager.extract(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = [extracted_paths]
lowercase_ : Optional[Any] = [paths]
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
assert "train" in extracted_paths.keys()
lowercase_ : Optional[Any] = extracted_paths.values()
lowercase_ : str = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowercase_ : int = Path(UpperCAmelCase__ )
lowercase_ : Optional[int] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(UpperCAmelCase__ , etag=UpperCAmelCase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowercase_ : List[str] = extracted_path.read_text()
lowercase_ : Union[str, Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] ) -> Tuple:
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(UpperCAmelCase__ , start=1 ):
lowercase_ : List[Any] = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] ) -> List[str]:
lowercase_ : int = request.getfixturevalue(UpperCAmelCase__ )
lowercase_ : Any = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(UpperCAmelCase__ ) , start=1 ):
_test_jsonl(UpperCAmelCase__ , UpperCAmelCase__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] ) -> List[Any]:
lowercase_ : Optional[int] = request.getfixturevalue(UpperCAmelCase__ )
lowercase_ : Optional[Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(UpperCAmelCase__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(UpperCAmelCase__ ) , start=1 ):
_test_jsonl(UpperCAmelCase__ , UpperCAmelCase__ )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> List[Any]:
lowercase_ : Optional[int] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(UpperCAmelCase__ ) , start=1 ):
assert os.path.basename(UpperCAmelCase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 351 | '''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowercase : Optional[List[str]] = None
_lowercase : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowercase : Optional[int] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class __magic_name__ :
UpperCamelCase__ = True
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = "PIL.Image.Image"
UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()})
UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase)
def __call__( self : Tuple ):
return self.pa_type
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : int = np.array(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase_ , lowercase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase_ )
elif isinstance(lowercase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowercase_ : Union[str, Any] = {}
lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowercase_ ):
lowercase_ : int = PIL.Image.open(lowercase_ )
else:
lowercase_ : str = path.split("""::""" )[-1]
try:
lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ )
except ValueError:
lowercase_ : str = None
with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f:
lowercase_ : Dict = BytesIO(f.read() )
lowercase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE_ ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase_ : Optional[int] = storage.field("""bytes""" )
else:
lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase_ : Dict = storage.field("""path""" )
else:
lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase_ : Optional[int] = pa.array(
[encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Tuple = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(lowercase_ : Optional[Any] ):
with xopen(lowercase_ , """rb""" ) as f:
lowercase_ : int = f.read()
return bytes_
lowercase_ : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ : Any = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes:
lowercase_ : Tuple = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ : int = image.format
else:
lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(UpperCAmelCase__ , format=UpperCAmelCase__ )
return buffer.getvalue()
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict:
if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowercase_ : List[Any] = array.dtype
lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase_ : Dict = dtype.kind
lowercase_ : List[Any] = dtype.itemsize
lowercase_ : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ : str = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ )
lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase__ , np.ndarray ):
lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
else:
return objs
else:
return objs
| 21 | 0 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
_UpperCamelCase : Dict = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_UpperCamelCase : List[str] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : List[Any] ):
'''simple docstring'''
for attribute in key.split('.' ):
lowercase = getattr(__snake_case , __snake_case )
if weight_type is not None:
lowercase = getattr(__snake_case , __snake_case ).shape
else:
lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
else:
lowercase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : Optional[Any] ):
'''simple docstring'''
lowercase = []
lowercase = fairseq_model.state_dict()
lowercase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(__snake_case )[0].split('.' )[-2]
lowercase = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
lowercase = 'weight_g'
elif "weight_v" in name:
lowercase = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
lowercase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase = 'weight'
else:
lowercase = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : List[Any] ):
'''simple docstring'''
lowercase = full_name.split('conv_layers.' )[-1]
lowercase = name.split('.' )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Dict=None ):
'''simple docstring'''
lowercase = torch.load(__snake_case )
lowercase = WavLMConfigOrig(checkpoint['cfg'] )
lowercase = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
lowercase = WavLMConfig.from_pretrained(__snake_case )
else:
lowercase = WavLMConfig()
lowercase = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_UpperCamelCase : Dict = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 220 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
'''simple docstring'''
lowercase = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[str] ):
'''simple docstring'''
lowercase = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
lowercase = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', 'stage2.cls_token') )
return token
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase = 'imagenet-1k-id2label.json'
lowercase = 10_00
lowercase = 'huggingface/label-files'
lowercase = num_labels
lowercase = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type='dataset' ) ) , 'r' ) )
lowercase = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = lowercase = CvtConfig(num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowercase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowercase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase = [2, 2, 20]
lowercase = [3, 12, 16]
lowercase = [1_92, 7_68, 10_24]
lowercase = CvtForImageClassification(__snake_case )
lowercase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowercase = image_size
lowercase = torch.load(__snake_case , map_location=torch.device('cpu' ) )
lowercase = OrderedDict()
lowercase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase = list_of_state_dict + cls_token(__snake_case )
lowercase = list_of_state_dict + embeddings(__snake_case )
for cnt in range(config.depth[idx] ):
lowercase = list_of_state_dict + attention(__snake_case , __snake_case )
lowercase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__snake_case )
for i in range(len(__snake_case ) ):
lowercase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_8_4,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_UpperCamelCase : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 220 | 1 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[str] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = PegasusTokenizerFast
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : int = True
def a ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def a ( self , **snake_case ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def a ( self , snake_case ):
return ("This is a test", "This is a test")
def a ( self ):
snake_case_ = '</s>'
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def a ( self ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1103 )
def a ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def a ( self ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a ( self ):
snake_case_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
snake_case_ = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
snake_case_ = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=_SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a ( self ):
snake_case_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
snake_case_ = 'To ensure a smooth flow of bank resolutions.'
snake_case_ = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=_SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def a ( self ):
snake_case_ = ['This is going to be way too long.' * 150, 'short example']
snake_case_ = ['not super long but more than 5 tokens', 'tiny']
snake_case_ = self._large_tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
snake_case_ = self._large_tokenizer(
text_target=_SCREAMING_SNAKE_CASE , max_length=5 , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_SCREAMING_SNAKE_CASE ) == 2 # input_ids, attention_mask.
@slow
def a ( self ):
# fmt: off
snake_case_ = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowercase ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = PegasusTokenizer
__SCREAMING_SNAKE_CASE : List[str] = PegasusTokenizerFast
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Tuple = True
def a ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(_SCREAMING_SNAKE_CASE , offset=0 , mask_token_sent=_SCREAMING_SNAKE_CASE , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def a ( self , **snake_case ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def a ( self , snake_case ):
return ("This is a test", "This is a test")
def a ( self ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_torch
def a ( self ):
snake_case_ = ['This is going to be way too long.' * 1000, 'short example']
snake_case_ = ['not super long but more than 5 tokens', 'tiny']
snake_case_ = self._large_tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
snake_case_ = self._large_tokenizer(
text_target=_SCREAMING_SNAKE_CASE , max_length=5 , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_SCREAMING_SNAKE_CASE ) == 2 # input_ids, attention_mask.
def a ( self ):
snake_case_ = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
snake_case_ = self._large_tokenizer(_SCREAMING_SNAKE_CASE ).input_ids
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 368 |
# Lint as: python3
import itertools
import os
import re
_UpperCAmelCase : str = re.compile(R"""([A-Z]+)([A-Z][a-z])""")
_UpperCAmelCase : Dict = re.compile(R"""([a-z\d])([A-Z])""")
_UpperCAmelCase : Dict = re.compile(R"""(?<!_)_(?!_)""")
_UpperCAmelCase : Tuple = re.compile(R"""(_{2,})""")
_UpperCAmelCase : Any = R"""^\w+(\.\w+)*$"""
_UpperCAmelCase : List[str] = R"""<>:/\|?*"""
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = _uppercase_uppercase_re.sub(r'\1_\2' , UpperCamelCase__ )
snake_case_ = _lowercase_uppercase_re.sub(r'\1_\2' , UpperCamelCase__ )
return name.lower()
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = _single_underscore_re.split(UpperCamelCase__ )
snake_case_ = [_multiple_underscores_re.split(UpperCamelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase__ ) if n != '' )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , UpperCamelCase__ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(UpperCamelCase__ )}-{split}'''
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
snake_case_ = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
return F'''{filepath}*'''
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ):
'''simple docstring'''
snake_case_ = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if shard_lengths:
snake_case_ = len(UpperCamelCase__ )
snake_case_ = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(UpperCamelCase__ )]
if filetype_suffix:
snake_case_ = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
snake_case_ = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 200 | 0 |
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : int = name
A_ : Any = val
def __str__( self )->Optional[Any]:
'''simple docstring'''
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
return self.val < other.val
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : Union[str, Any] = {}
A_ : int = {}
A_ : List[Any] = self.build_heap(_SCREAMING_SNAKE_CASE )
def __getitem__( self , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
return self.get_value(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
return (idx - 1) // 2
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
return idx * 2 + 1
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
return idx * 2 + 2
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
return self.heap_dict[key]
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : List[str] = len(_SCREAMING_SNAKE_CASE ) - 1
A_ : List[Any] = self.get_parent_idx(_SCREAMING_SNAKE_CASE )
for idx, i in enumerate(_SCREAMING_SNAKE_CASE ):
A_ : Dict = idx
A_ : Dict = i.val
for i in range(_SCREAMING_SNAKE_CASE , -1 , -1 ):
self.sift_down(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return array
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
while True:
A_ : str = self.get_left_child_idx(_SCREAMING_SNAKE_CASE ) # noqa: E741
A_ : Dict = self.get_right_child_idx(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = idx
if l < len(_SCREAMING_SNAKE_CASE ) and array[l] < array[idx]:
A_ : Optional[Any] = l
if r < len(_SCREAMING_SNAKE_CASE ) and array[r] < array[smallest]:
A_ : Tuple = r
if smallest != idx:
A_ , A_ : Tuple = array[smallest], array[idx]
(
(
A_
) , (
A_
) ,
) : Union[str, Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A_ : Optional[int] = smallest
else:
break
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
A_ : List[Any] = self.get_parent_idx(_SCREAMING_SNAKE_CASE )
while p >= 0 and self.heap[p] > self.heap[idx]:
A_ , A_ : Union[str, Any] = self.heap[idx], self.heap[p]
A_ , A_ : int = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A_ : Optional[int] = p
A_ : int = self.get_parent_idx(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->str:
'''simple docstring'''
return self.heap[0]
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ , A_ : Union[str, Any] = self.heap[-1], self.heap[0]
A_ , A_ : Union[str, Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A_ : Union[str, Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
self.heap.append(_SCREAMING_SNAKE_CASE )
A_ : int = len(self.heap ) - 1
A_ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return len(self.heap ) == 0
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A_ : Optional[int] = new_value
A_ : Union[str, Any] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase = Node("""R""", -1)
UpperCamelCase = Node("""B""", 6)
UpperCamelCase = Node("""A""", 3)
UpperCamelCase = Node("""X""", 1)
UpperCamelCase = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[Any] = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
A_ : List[str] = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
if magnitude == 0:
A_ : Union[str, Any] = column
continue
A_ : Dict = column / magnitude
# Subtract to cancel term
A_ : Union[str, Any] = current_set[0]
A_ : Tuple = [first_row]
A_ : int = current_set[1::]
for row in current_set:
A_ : Tuple = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
A_ : Optional[Any] = final_set[0]
A_ : Any = []
A_ : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
A_ : Optional[Any] = simplify(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE )
A_ : List[Any] = resultant
return final_set
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
A_ : str = len(SCREAMING_SNAKE_CASE ) + 1
if any(len(SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
A_ : Dict = equations.copy()
if any(0 in row for row in data_set ):
A_ : Tuple = data_set.copy()
A_ : Optional[Any] = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
if 0 not in row:
A_ : str = data_set.pop(SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , SCREAMING_SNAKE_CASE )
A_ : int = data_set.copy()
A_ : Dict = simplify(SCREAMING_SNAKE_CASE )
A_ : Dict = simplified[::-1]
A_ : list = []
for row in simplified:
A_ : Union[str, Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
A_ : Optional[Any] = row.copy()[: len(SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
A_ : int = temp_row[1::]
A_ : int = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 186 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase_ : str = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=None , snake_case_ : str=1 ):
UpperCamelCase_: List[str] = tokenizer
UpperCamelCase_: str = dataset
UpperCamelCase_: List[str] = len(snake_case_ ) if n_tasks is None else n_tasks
UpperCamelCase_: str = n_copies
def __iter__( self : Tuple ):
UpperCamelCase_: Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
UpperCamelCase_: List[str] = self.tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Any , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = start_length
UpperCamelCase_: Dict = eof_strings
UpperCamelCase_: List[str] = tokenizer
def __call__( self : Tuple , snake_case_ : List[str] , snake_case_ : Optional[Any] , **snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCamelCase_: Dict = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(snake_case_ )
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: str = re.split("""(%s)""" % """|""".join(lowerCamelCase ) , lowerCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=20 , **lowerCamelCase ) -> int:
UpperCamelCase_: str = defaultdict(lowerCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(lowerCamelCase ) ):
with torch.no_grad():
UpperCamelCase_: Optional[int] = batch["""ids"""].shape[-1]
UpperCamelCase_: Dict = accelerator.unwrap_model(lowerCamelCase ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=lowerCamelCase , **lowerCamelCase )
# each task is generated batch_size times
UpperCamelCase_: Optional[int] = batch["""task_id"""].repeat(lowerCamelCase )
UpperCamelCase_: int = accelerator.pad_across_processes(
lowerCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCamelCase_, UpperCamelCase_: Tuple = accelerator.gather((generated_tokens, generated_tasks) )
UpperCamelCase_: Tuple = generated_tokens.cpu().numpy()
UpperCamelCase_: Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(lowerCamelCase , lowerCamelCase ):
gen_token_dict[task].append(lowerCamelCase )
UpperCamelCase_: Dict = [[] for _ in range(lowerCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCamelCase_: Any = tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
code_gens[task].append(remove_last_block(lowerCamelCase ) )
return code_gens
def A__ ( ) -> Union[str, Any]:
# Setup configuration
UpperCamelCase_: Optional[Any] = HfArgumentParser(lowerCamelCase )
UpperCamelCase_: str = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCamelCase_: List[str] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCamelCase_: Union[str, Any] = """false"""
if args.num_workers is None:
UpperCamelCase_: Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCamelCase_: List[Any] = Accelerator()
set_seed(args.seed , device_specific=lowerCamelCase )
# Load model and tokenizer
UpperCamelCase_: Any = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCamelCase_: Union[str, Any] = tokenizer.eos_token
UpperCamelCase_: Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCamelCase_: Union[str, Any] = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , lowerCamelCase , lowerCamelCase )] ),
}
# Load evaluation dataset and metric
UpperCamelCase_: Any = load_dataset("""openai_humaneval""" )
UpperCamelCase_: Union[str, Any] = load_metric("""code_eval""" )
UpperCamelCase_: Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
UpperCamelCase_: List[Any] = args.n_samples // args.batch_size
UpperCamelCase_: Any = TokenizedDataset(lowerCamelCase , human_eval["""test"""] , n_copies=lowerCamelCase , n_tasks=lowerCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCamelCase_: Optional[int] = DataLoader(lowerCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCamelCase_: List[str] = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
UpperCamelCase_, UpperCamelCase_: Dict = accelerator.prepare(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = complete_code(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , n_tasks=lowerCamelCase , batch_size=args.batch_size , **lowerCamelCase , )
if accelerator.is_main_process:
UpperCamelCase_: List[Any] = []
for task in tqdm(range(lowerCamelCase ) ):
UpperCamelCase_: Optional[Any] = human_eval["""test"""][task]["""test"""]
UpperCamelCase_: Optional[int] = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
UpperCamelCase_, UpperCamelCase_: str = code_eval_metric.compute(
references=lowerCamelCase , predictions=lowerCamelCase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , """w""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 223 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = """ctrl"""
__UpperCamelCase : Dict = ["""past_key_values"""]
__UpperCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Dict , snake_case_ : Any=24_6534 , snake_case_ : Dict=256 , snake_case_ : str=1280 , snake_case_ : Optional[int]=8192 , snake_case_ : Union[str, Any]=48 , snake_case_ : Any=16 , snake_case_ : Optional[int]=0.1 , snake_case_ : Any=0.1 , snake_case_ : Any=1e-6 , snake_case_ : Optional[Any]=0.02 , snake_case_ : Optional[int]=True , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Union[str, Any] = vocab_size
UpperCamelCase_: Union[str, Any] = n_positions
UpperCamelCase_: Optional[int] = n_embd
UpperCamelCase_: int = n_layer
UpperCamelCase_: str = n_head
UpperCamelCase_: Optional[int] = dff
UpperCamelCase_: Optional[Any] = resid_pdrop
UpperCamelCase_: Union[str, Any] = embd_pdrop
UpperCamelCase_: List[str] = layer_norm_epsilon
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Optional[Any] = use_cache
super().__init__(**snake_case_ )
| 223 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
for attribute in key.split('.' ):
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowercase = None
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__lowercase = True
elif name.split('.' )[0] == "proj":
__lowercase = fairseq_model.proj
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowercase = 'weight_g'
elif "weight_v" in name:
__lowercase = 'weight_v'
elif "bias" in name:
__lowercase = 'bias'
elif "weight" in name:
__lowercase = 'weight'
else:
__lowercase = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
__lowercase = full_name.split('conv_layers.' )[-1]
__lowercase = name.split('.' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowercase = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__lowercase = f.readlines()
__lowercase = [line.split(' ' )[0] for line in lines]
__lowercase = len(SCREAMING_SNAKE_CASE )
__lowercase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]:
__lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowercase = model[0].eval()
# set weights for wav2vec2 encoder
__lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE )
__lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowercase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
__lowercase = False
# add projection layer
__lowercase = nn.Parameter(projection_layer.weight )
__lowercase = nn.Parameter(projection_layer.bias )
__lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = hf_wavavec.config.to_dict()
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer.eos_token_id
__lowercase = 'speech_to_text_2'
__lowercase = 'wav2vec2'
__lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 325 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
snake_case : int = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def __lowercase ( __lowerCAmelCase : Optional[int] ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
a__ = list(s_dict.keys() )
for key in keys:
a__ = R'.*/layers_(\d+)'
a__ = key
if re.match(snake_case__ , snake_case__ ):
a__ = re.sub(R'layers_(\d+)' , R'block/\1/layer' , snake_case__ )
a__ = R'(encoder|decoder)\/'
if re.match(snake_case__ , snake_case__ ):
a__ = re.match(snake_case__ , snake_case__ ).groups()
if groups[0] == "encoder":
a__ = re.sub(R'/mlp/' , R'/1/mlp/' , snake_case__ )
a__ = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , snake_case__ )
elif groups[0] == "decoder":
a__ = re.sub(R'/mlp/' , R'/2/mlp/' , snake_case__ )
a__ = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , snake_case__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
a__ = new_key.replace(snake_case__ , snake_case__ )
print(F'{key} -> {new_key}' )
a__ = s_dict.pop(snake_case__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a__ = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a__ = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
a__ = s_dict[key].shape[0]
a__ = s_dict[key]
for idx in range(snake_case__ ):
a__ = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(snake_case__ )
return s_dict
snake_case : str = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(snake_case__ , 'r' ) as f:
a__ = f.read()
a__ = re.findall(R'(.*) = ([0-9.]*)' , snake_case__ )
a__ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
a__ = float(snake_case__ ) if '.' in value else int(snake_case__ )
a__ = re.findall(R'(.*activations) = \(\'(.*)\',\)' , snake_case__ )[0]
a__ = str(activation[1] )
a__ = num_experts
a__ = SwitchTransformersConfig(**snake_case__ )
return config
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str=None , __lowerCAmelCase : Union[str, Any]="./" , __lowerCAmelCase : Optional[Any]=8 ):
# Initialise PyTorch model
print(F'Loading flax weights from : {flax_checkpoint_path}' )
a__ = checkpoints.load_tax_checkpoint(snake_case__ )
if gin_file is not None:
a__ = convert_gin_to_config(snake_case__ , snake_case__ )
else:
a__ = SwitchTransformersConfig.from_pretrained(snake_case__ )
a__ = SwitchTransformersForConditionalGeneration(snake_case__ )
a__ = flax_params['target']
a__ = flatten_dict(snake_case__ , sep='/' )
a__ = rename_keys(snake_case__ )
a__ = unflatten_dict(snake_case__ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
snake_case : Optional[Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 365 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
snake_case : Dict = get_logger(__name__)
snake_case : str = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class snake_case_ :
@add_start_docstrings(__snake_case )
def __call__( self :Dict ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class snake_case_ :
@add_start_docstrings(__snake_case )
def __call__( self :List[str] ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class snake_case_ (lowerCamelCase_ ):
@add_start_docstrings(__snake_case )
def __call__( self :Dict ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ,**__snake_case :Any ) -> jnp.ndarray:
for processor in self:
a__ = inspect.signature(processor.__call__ ).parameters
if len(__snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'Make sure that all the required parameters: {list(function_args.keys() )} for '
F'{processor.__class__} are passed to the logits processor.' )
a__ = processor(__snake_case ,__snake_case ,__snake_case ,**__snake_case )
else:
a__ = processor(__snake_case ,__snake_case ,__snake_case )
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :float ) -> Tuple:
if not isinstance(__snake_case ,__snake_case ) or not (temperature > 0):
raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' )
a__ = temperature
def __call__( self :Optional[int] ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
a__ = scores / self.temperature
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Any ,__snake_case :float ,__snake_case :float = -float('Inf' ) ,__snake_case :int = 1 ) -> Dict:
if not isinstance(__snake_case ,__snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(__snake_case ,__snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
a__ = top_p
a__ = filter_value
a__ = min_tokens_to_keep
def __call__( self :Optional[int] ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
a__ , a__ = lax.top_k(__snake_case ,scores.shape[-1] )
a__ = jnp.full_like(__snake_case ,self.filter_value )
a__ = jax.nn.softmax(__snake_case ,axis=-1 ).cumsum(axis=-1 )
a__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
a__ = jnp.roll(__snake_case ,1 )
score_mask |= score_mask.at[:, 0].set(__snake_case )
# min tokens to keep
a__ = score_mask.at[:, : self.min_tokens_to_keep].set(__snake_case )
a__ = jnp.where(__snake_case ,__snake_case ,__snake_case )
a__ = jax.lax.sort_key_val(__snake_case ,__snake_case )[-1]
return next_scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :List[str] ,__snake_case :int ,__snake_case :float = -float('Inf' ) ,__snake_case :int = 1 ) -> Any:
if not isinstance(__snake_case ,__snake_case ) or top_k <= 0:
raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' )
a__ = max(__snake_case ,__snake_case )
a__ = filter_value
def __call__( self :int ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
a__ , a__ = scores.shape
a__ = jnp.full(batch_size * vocab_size ,self.filter_value )
a__ = min(self.top_k ,scores.shape[-1] ) # Safety check
a__ , a__ = lax.top_k(__snake_case ,__snake_case )
a__ = jnp.broadcast_to((jnp.arange(__snake_case ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
a__ = topk_scores.flatten()
a__ = topk_indices.flatten() + shift
a__ = next_scores_flat.at[topk_indices_flat].set(__snake_case )
a__ = next_scores_flat.reshape(__snake_case ,__snake_case )
return next_scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :int ,__snake_case :int ) -> str:
a__ = bos_token_id
def __call__( self :List[Any] ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
a__ = jnp.full(scores.shape ,-float('inf' ) )
a__ = 1 - jnp.bool_(cur_len - 1 )
a__ = jnp.where(__snake_case ,new_scores.at[:, self.bos_token_id].set(0 ) ,__snake_case )
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Union[str, Any] ,__snake_case :int ,__snake_case :int ) -> List[Any]:
a__ = max_length
a__ = eos_token_id
def __call__( self :int ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
a__ = jnp.full(scores.shape ,-float('inf' ) )
a__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
a__ = jnp.where(__snake_case ,new_scores.at[:, self.eos_token_id].set(0 ) ,__snake_case )
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :int ,__snake_case :int ) -> List[str]:
if not isinstance(__snake_case ,__snake_case ) or min_length < 0:
raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(__snake_case ,__snake_case ) or eos_token_id < 0:
raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
a__ = min_length
a__ = eos_token_id
def __call__( self :Any ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
a__ = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
a__ = jnp.where(__snake_case ,scores.at[:, self.eos_token_id].set(-float('inf' ) ) ,__snake_case )
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Optional[int] ) -> Tuple:
a__ = list(__snake_case )
a__ = begin_index
def __call__( self :str ,__snake_case :List[str] ,__snake_case :str ,__snake_case :int ) -> str:
a__ = 1 - jnp.bool_(cur_len - self.begin_index )
a__ = jnp.where(__snake_case ,scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) ,__snake_case )
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :List[str] ,__snake_case :list ) -> List[Any]:
a__ = list(__snake_case )
def __call__( self :Dict ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
a__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Dict ,__snake_case :Optional[int] ) -> Union[str, Any]:
a__ = dict(__snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
a__ = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
a__ = force_token_array.at[index].set(__snake_case )
a__ = jnp.intaa(__snake_case )
def __call__( self :Optional[int] ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
def _force_token(__snake_case :Optional[Any] ):
a__ = scores.shape[0]
a__ = self.force_token_array[generation_idx]
a__ = jnp.ones_like(__snake_case ,dtype=scores.dtype ) * -float('inf' )
a__ = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
a__ = lax.dynamic_update_slice(__snake_case ,__snake_case ,(0, current_token) )
return new_scores
a__ = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(__snake_case ) ,lambda: scores ,) ,)
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Any ,__snake_case :List[str] ,__snake_case :str ,__snake_case :List[Any] ) -> Optional[int]:
a__ = generate_config.eos_token_id
a__ = generate_config.no_timestamps_token_id
a__ = generate_config.no_timestamps_token_id + 1
a__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__snake_case ,'max_initial_timestamp_index' ):
a__ = generate_config.max_initial_timestamp_index
else:
a__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
a__ = model_config.vocab_size
def __call__( self :Any ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ) -> Tuple:
# suppress <|notimestamps|> which is handled by without_timestamps
a__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(__snake_case :List[str] ,__snake_case :Union[str, Any] ):
a__ = jnp.where((cur_len - self.begin_index) >= 1 ,__snake_case ,__snake_case )
a__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,__snake_case ,)
a__ = jnp.where((cur_len - self.begin_index) < 2 ,__snake_case ,__snake_case )
a__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,__snake_case ,__snake_case ,)
return jnp.where(
__snake_case ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) ,scores_k.at[: self.eos_token_id].set(-float('inf' ) ) ,) ,__snake_case ,)
a__ = jax.vmap(__snake_case )(__snake_case ,__snake_case )
a__ = jnp.where(cur_len == self.begin_index ,__snake_case ,__snake_case )
a__ = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,__snake_case ,)
a__ = self.timestamp_begin + self.max_initial_timestamp_index
a__ = jnp.where(
__snake_case ,scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) ,__snake_case ,)
# if sum of probability over timestamps is above any other token, sample timestamp
a__ = jax.nn.log_softmax(__snake_case ,axis=-1 )
def handle_cumulative_probs(__snake_case :Dict ,__snake_case :List[Any] ):
a__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
a__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) ,__snake_case ,)
a__ = jax.vmap(__snake_case )(__snake_case ,__snake_case )
return scores
| 109 | 0 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def lowerCamelCase (_SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : bool , ):
__a , __a : Dict = grid.shape
__a : Optional[Any] = [-1, 1, 0, 0]
__a : List[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__a , __a : Optional[int] = [(0, source)], set()
__a : Union[str, Any] = np.full((rows, cols) , np.inf )
__a : Union[str, Any] = 0
__a : Tuple = np.empty((rows, cols) , dtype=_SCREAMING_SNAKE_CASE )
__a : int = None
while queue:
((__a) , (__a)) : List[str] = heappop(_SCREAMING_SNAKE_CASE )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__a : Any = []
while (x, y) != source:
path.append((x, y) )
__a , __a : Dict = predecessors[x, y]
path.append(_SCREAMING_SNAKE_CASE ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__a , __a : int = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__a : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) )
__a : Optional[int] = dist + 1
__a : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : Dict = 16
__lowercase : List[Any] = 32
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return int(x / 2**20 )
class __UpperCamelCase :
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__a : Optional[int] = torch.cuda.memory_allocated()
return self
def __exit__( self , *__a ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__a : Dict = torch.cuda.memory_allocated()
__a : List[Any] = torch.cuda.max_memory_allocated()
__a : Tuple = bamb(self.end - self.begin )
__a : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ):
__a : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : List[Any] = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(_SCREAMING_SNAKE_CASE : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : List[str] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__a : int = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
# Initialize accelerator
__a : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Dict = config['lr']
__a : str = int(config['num_epochs'] )
__a : Optional[int] = int(config['seed'] )
__a : Any = int(config['batch_size'] )
__a : List[str] = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__a : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__a : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__a : Union[str, Any] = 1
__a : Tuple = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a : str = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
__a : List[Any] = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Optional[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__a : Dict = 0
# Now we train the model
__a : str = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = model(**_SCREAMING_SNAKE_CASE )
__a : str = outputs.loss
__a : str = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__a : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , )
__a : List[str] = parser.parse_args()
__a : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 27 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A : Any = logging.get_logger(__name__)
_A : Any = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_A : Union[str, Any] = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_A : str = """</w>"""
_A : Optional[int] = """@@ """
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Any = set()
lowerCamelCase__ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : Optional[Any] = char
return pairs
# Speech2Text2 has no max input length
_A : Any = {"""facebook/s2t-wav2vec2-large-en-de""": 1_024}
class _lowercase ( lowerCamelCase__ ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ['input_ids', 'attention_mask']
def __init__( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: Any="<s>" , UpperCamelCase__: List[Any]="<pad>" , UpperCamelCase__: Tuple="</s>" , UpperCamelCase__: List[Any]="<unk>" , UpperCamelCase__: Union[str, Any]=False , UpperCamelCase__: Optional[Any]=None , **UpperCamelCase__: int , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase__ : List[Any] = do_lower_case
with open(UpperCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase__ : Tuple = json.load(UpperCamelCase__ )
lowerCamelCase__ : List[str] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Any = None
else:
with open(UpperCamelCase__ , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase__ : str = merges_handle.read().split("""\n""" )[:-1]
lowerCamelCase__ : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
lowerCamelCase__ : Union[str, Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : Any = {}
@property
def lowerCamelCase_ ( self: Dict ):
return len(self.decoder )
def lowerCamelCase_ ( self: List[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self: str , UpperCamelCase__: List[str] ):
lowerCamelCase__ : int = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : Any = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
lowerCamelCase__ : List[str] = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : int = bigram
lowerCamelCase__ : str = []
lowerCamelCase__ : Tuple = 0
while i < len(UpperCamelCase__ ):
try:
lowerCamelCase__ : List[Any] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase__ : Optional[int] = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : str = tuple(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
lowerCamelCase__ : Dict = get_pairs(UpperCamelCase__ )
lowerCamelCase__ : Any = """ """.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCamelCase__ : Optional[int] = """\n""" + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
lowerCamelCase__ : List[str] = word.replace(UpperCamelCase__ , """""" )
lowerCamelCase__ : Dict = word.replace(""" """ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = word
return word
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Optional[Any] ):
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
lowerCamelCase__ : List[str] = text.lower()
lowerCamelCase__ : Union[str, Any] = text.split()
lowerCamelCase__ : Any = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(""" """ ) ) )
return split_tokens
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[str] ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : Optional[int] = """ """.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
lowerCamelCase__ : List[str] = """""".join(string.split(UpperCamelCase__ ) )
return string
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + """\n""" )
lowerCamelCase__ : Tuple = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
lowerCamelCase__ : Union[str, Any] = token_index
writer.write(""" """.join(UpperCamelCase__ ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 356 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> list[str]:
if nth_term == "":
return [""]
lowerCamelCase__ : str = int(UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = int(UpperCamelCase )
lowerCamelCase__ : list[str] = []
for temp in range(int(UpperCamelCase ) ):
series.append(f'''1 / {pow(temp + 1 , int(UpperCamelCase ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Optional[Any] =int(input('''Enter the last number (nth term) of the P-Series'''))
_A : List[str] =int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 129 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : str =field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
UpperCamelCase__ : ClassVar[Features] =Features({"""audio""": Audio()} )
UpperCamelCase__ : ClassVar[Features] =Features({"""transcription""": Value("""string""" )} )
UpperCamelCase__ : str ="audio"
UpperCamelCase__ : str ="transcription"
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowerCamelCase__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
__UpperCamelCase : Optional[int] =copy.deepcopy(self )
__UpperCamelCase : int =self.input_schema.copy()
__UpperCamelCase : Union[str, Any] =features[self.audio_column]
__UpperCamelCase : List[Any] =input_schema
return task_template
@property
def __lowercase ( self ):
"""simple docstring"""
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 71 |
from collections import deque
from math import floor
from random import random
from time import time
class a__ :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
A__ = {}
def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple:
'''simple docstring'''
if self.graph.get(lowercase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A__ = [[w, v]]
if not self.graph.get(lowercase ):
A__ = []
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return list(self.graph )
def UpperCamelCase ( self , lowercase , lowercase ) -> int:
'''simple docstring'''
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any:
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
A__ = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def UpperCamelCase ( self , lowercase=-2 ) -> Any:
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return len(self.graph[u] )
def UpperCamelCase ( self , lowercase=-2 ) -> str:
'''simple docstring'''
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
A__ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return sorted_nodes
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any:
'''simple docstring'''
A__ = time()
self.dfs(lowercase , lowercase )
A__ = time()
return end - begin
def UpperCamelCase ( self , lowercase=-2 ) -> int:
'''simple docstring'''
A__ = time()
self.bfs(lowercase )
A__ = time()
return end - begin
class a__ :
"""simple docstring"""
def __init__( self ) -> int:
'''simple docstring'''
A__ = {}
def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A__ = [[w, v]]
# add the other way
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A__ = [[w, u]]
def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
# the other way round
if self.graph.get(lowercase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def UpperCamelCase ( self , lowercase=-1 ) -> str:
'''simple docstring'''
if c == -1:
A__ = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def UpperCamelCase ( self , lowercase=-2 ) -> Dict:
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
return len(self.graph[u] )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return list(self.graph )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
A__ = time()
self.dfs(lowercase , lowercase )
A__ = time()
return end - begin
def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]:
'''simple docstring'''
A__ = time()
self.bfs(lowercase )
A__ = time()
return end - begin
| 68 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
a__ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
a__ : Optional[Any] = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
a__ : Union[str, Any] = '''>>zh<<'''
a__ : Tuple = '''Helsinki-NLP/'''
if is_torch_available():
a__ : List[str] = '''pt'''
elif is_tf_available():
a__ : List[str] = '''tf'''
else:
a__ : Union[str, Any] = '''jax'''
@require_sentencepiece
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = MarianTokenizer
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Dict = True
def __lowerCAmelCase ( self ) ->Optional[Any]:
super().setUp()
SCREAMING_SNAKE_CASE : List[str] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
SCREAMING_SNAKE_CASE : Any = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.tmpdirname )
save_json(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
SCREAMING_SNAKE_CASE : Tuple = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''</s>'''
SCREAMING_SNAKE_CASE : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(_lowerCamelCase ) , 9 )
def __lowerCAmelCase ( self ) ->List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
SCREAMING_SNAKE_CASE : str = en_de_tokenizer(['''I am a small frog'''] , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(_lowerCamelCase , batch.input_ids[0] )
SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = [x.name for x in Path(_lowerCamelCase ).glob('''*''' )]
self.assertIn('''source.spm''' , _lowerCamelCase )
MarianTokenizer.from_pretrained(_lowerCamelCase )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE : int = tok(
['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Tuple = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def __lowerCAmelCase ( self ) ->List[str]:
# fmt: off
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = '''Tämä on testi'''
SCREAMING_SNAKE_CASE : Tuple = '''This is a test'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [76, 7, 2047, 2]
SCREAMING_SNAKE_CASE : str = [69, 12, 11, 940, 2]
SCREAMING_SNAKE_CASE : Tuple = tokenizer(_lowerCamelCase ).input_ids
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(text_target=_lowerCamelCase ).input_ids
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 19 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a__ : List[str] = logging.get_logger(__name__)
# General docstring
a__ : Tuple = '''MobileNetV1Config'''
# Base docstring
a__ : Optional[Any] = '''google/mobilenet_v1_1.0_224'''
a__ : Tuple = [1, 1_024, 7, 7]
# Image classification docstring
a__ : Optional[int] = '''google/mobilenet_v1_1.0_224'''
a__ : int = '''tabby, tabby cat'''
a__ : List[Any] = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def UpperCAmelCase_( a__ , a__ , a__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {}
if isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[str] = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = model
SCREAMING_SNAKE_CASE : Optional[int] = '''MobilenetV1/Conv2d_0/'''
SCREAMING_SNAKE_CASE : Tuple = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE : Tuple = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE : Optional[Any] = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE : Union[str, Any] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE : Any = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE : Dict = i + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = i * 2
SCREAMING_SNAKE_CASE : Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
SCREAMING_SNAKE_CASE : Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE : Tuple = pointer.normalization.bias
SCREAMING_SNAKE_CASE : List[Any] = pointer.normalization.weight
SCREAMING_SNAKE_CASE : Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE : List[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE : List[Any] = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE : Any = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
SCREAMING_SNAKE_CASE : Dict = pointer.convolution.weight
SCREAMING_SNAKE_CASE : Optional[Any] = pointer.normalization.bias
SCREAMING_SNAKE_CASE : Optional[Any] = pointer.normalization.weight
SCREAMING_SNAKE_CASE : int = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE : str = pointer.normalization.running_var
if isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : List[Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/'''
SCREAMING_SNAKE_CASE : List[str] = model.classifier.weight
SCREAMING_SNAKE_CASE : List[str] = model.classifier.bias
return tf_to_pt_map
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE : Optional[Any] = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : List[Any] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
SCREAMING_SNAKE_CASE : Tuple = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE : int = _build_tf_to_pytorch_map(a__ , a__ , a__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
SCREAMING_SNAKE_CASE : Tuple = np.transpose(a__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE : Union[str, Any] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE : Optional[int] = np.transpose(a__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(a__ )
tf_weights.pop(a__ , a__ )
tf_weights.pop(name + '''/RMSProp''' , a__ )
tf_weights.pop(name + '''/RMSProp_1''' , a__ )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , a__ )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = conv_layer.stride
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE : List[str] = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE : str = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE : int = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE : Tuple = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE : List[str] = pad_along_width // 2
SCREAMING_SNAKE_CASE : Any = pad_along_width - pad_left
SCREAMING_SNAKE_CASE : str = pad_along_height // 2
SCREAMING_SNAKE_CASE : Optional[int] = pad_along_height - pad_top
SCREAMING_SNAKE_CASE : List[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(a__ , a__ , '''constant''' , 0.0 )
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 , _lowerCamelCase = 1 , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = True , ) ->None:
super().__init__()
SCREAMING_SNAKE_CASE : Any = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
SCREAMING_SNAKE_CASE : Any = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
SCREAMING_SNAKE_CASE : List[str] = nn.Convad(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=_lowerCamelCase , stride=_lowerCamelCase , padding=_lowerCamelCase , groups=_lowerCamelCase , bias=_lowerCamelCase , padding_mode='''zeros''' , )
if use_normalization:
SCREAMING_SNAKE_CASE : List[Any] = nn.BatchNormad(
num_features=_lowerCamelCase , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=_lowerCamelCase , track_running_stats=_lowerCamelCase , )
else:
SCREAMING_SNAKE_CASE : Dict = None
if use_activation:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE : List[Any] = config.hidden_act
else:
SCREAMING_SNAKE_CASE : Optional[Any] = None
def __lowerCAmelCase ( self , _lowerCamelCase ) ->torch.Tensor:
if self.config.tf_padding:
SCREAMING_SNAKE_CASE : List[Any] = apply_tf_padding(_lowerCamelCase , self.convolution )
SCREAMING_SNAKE_CASE : Dict = self.convolution(_lowerCamelCase )
if self.normalization is not None:
SCREAMING_SNAKE_CASE : int = self.normalization(_lowerCamelCase )
if self.activation is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.activation(_lowerCamelCase )
return features
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = MobileNetVaConfig
__SCREAMING_SNAKE_CASE : List[Any] = load_tf_weights_in_mobilenet_va
__SCREAMING_SNAKE_CASE : int = 'mobilenet_v1'
__SCREAMING_SNAKE_CASE : int = 'pixel_values'
__SCREAMING_SNAKE_CASE : List[str] = False
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
if isinstance(_lowerCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
a__ : str = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a__ : Union[str, Any] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , a__ , )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = True ) ->Dict:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = config
SCREAMING_SNAKE_CASE : Dict = 32
SCREAMING_SNAKE_CASE : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
SCREAMING_SNAKE_CASE : str = MobileNetVaConvLayer(
_lowerCamelCase , in_channels=config.num_channels , out_channels=_lowerCamelCase , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE : Any = nn.ModuleList()
for i in range(13 ):
SCREAMING_SNAKE_CASE : int = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE : Tuple = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=_lowerCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=1 , ) )
SCREAMING_SNAKE_CASE : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
raise NotImplementedError
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) ->Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_stem(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
SCREAMING_SNAKE_CASE : Optional[int] = layer_module(_lowerCamelCase )
if output_hidden_states:
SCREAMING_SNAKE_CASE : List[str] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : List[str] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE : Tuple = torch.flatten(self.pooler(_lowerCamelCase ) , start_dim=1 )
else:
SCREAMING_SNAKE_CASE : List[Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCamelCase , pooler_output=_lowerCamelCase , hidden_states=_lowerCamelCase , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a__ , )
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->None:
super().__init__(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = config.num_labels
SCREAMING_SNAKE_CASE : str = MobileNetVaModel(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(config.classifier_dropout_prob , inplace=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = nn.Linear(_lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) ->Union[tuple, ImageClassifierOutputWithNoAttention]:
SCREAMING_SNAKE_CASE : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Dict = self.mobilenet_va(_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE : Tuple = self.classifier(self.dropout(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : Any = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE : Optional[int] = '''single_label_classification'''
else:
SCREAMING_SNAKE_CASE : Dict = '''multi_label_classification'''
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE : Any = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE : Dict = loss_fct(_lowerCamelCase , _lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE : str = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE : List[Any] = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE : List[Any] = loss_fct(_lowerCamelCase , _lowerCamelCase )
if not return_dict:
SCREAMING_SNAKE_CASE : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_lowerCamelCase , logits=_lowerCamelCase , hidden_states=outputs.hidden_states , )
| 19 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 298 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
snake_case : Any = logging.get_logger(__name__)
snake_case : List[Any] = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _snake_case ( lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = """dpt"""
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-12 , _lowerCamelCase=384 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=[2, 5, 8, 11] , _lowerCamelCase="project" , _lowerCamelCase=[4, 2, 1, 0.5] , _lowerCamelCase=[96, 192, 384, 768] , _lowerCamelCase=256 , _lowerCamelCase=-1 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.4 , _lowerCamelCase=255 , _lowerCamelCase=0.1 , _lowerCamelCase=[1, 1024, 24, 24] , _lowerCamelCase=[0, 1] , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(**_UpperCAmelCase )
a :str = hidden_size
a :Optional[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
a :Union[str, Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
a :Tuple = BitConfig(**_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
a :Any = BitConfig(**_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a :int = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
a :int = backbone_featmap_shape
a :Optional[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
a :Union[str, Any] = None
a :List[str] = None
a :Tuple = []
a :Optional[int] = num_hidden_layers
a :List[Any] = num_attention_heads
a :List[str] = intermediate_size
a :Optional[Any] = hidden_act
a :int = hidden_dropout_prob
a :int = attention_probs_dropout_prob
a :Any = initializer_range
a :Union[str, Any] = layer_norm_eps
a :List[Any] = image_size
a :Any = patch_size
a :str = num_channels
a :Tuple = qkv_bias
a :int = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
a :Tuple = readout_type
a :List[str] = reassemble_factors
a :int = neck_hidden_sizes
a :Union[str, Any] = fusion_hidden_size
a :Optional[Any] = head_in_index
a :Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
a :Optional[int] = use_auxiliary_head
a :Union[str, Any] = auxiliary_loss_weight
a :Tuple = semantic_loss_ignore_index
a :Any = semantic_classifier_dropout
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
a :List[Any] = self.backbone_config.to_dict()
a :Dict = self.__class__.model_type
return output
| 367 |
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('''Input value must be an \'int\' type''' )
a :Optional[int] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 0 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
UpperCamelCase__ = threading.Lock()
UpperCamelCase__ = None
UpperCamelCase__ = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCamelCase__ = logging.WARNING
UpperCamelCase__ = True
def a__ ( ) -> str:
UpperCAmelCase__ : List[Any] = os.getenv('''TRANSFORMERS_VERBOSITY''' , SCREAMING_SNAKE_CASE__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def a__ ( ) -> Tuple:
return __name__.split('''.''' )[0]
def a__ ( ) -> int:
return logging.getLogger(_get_library_name() )
def a__ ( ) -> Tuple:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase__ : List[str] = logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase__ : List[str] = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase__ : Dict = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase__ : str = False
def a__ ( ) -> Dict:
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase__ : List[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase__ : Optional[Any] = None
def a__ ( ) -> Dict:
return log_levels
def a__ ( lowerCAmelCase__ = None ) -> List[str]:
if name is None:
UpperCAmelCase__ : Any = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(SCREAMING_SNAKE_CASE__ )
def a__ ( ) -> Union[str, Any]:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a__ ( lowerCAmelCase__ ) -> Any:
_configure_library_root_logger()
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE__ )
def a__ ( ) -> Tuple:
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def a__ ( ) -> Dict:
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def a__ ( ) -> Optional[Any]:
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def a__ ( ) -> str:
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def a__ ( ) -> Dict:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a__ ( ) -> Dict:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a__ ( lowerCAmelCase__ ) -> Dict:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(SCREAMING_SNAKE_CASE__ )
def a__ ( lowerCAmelCase__ ) -> Tuple:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(SCREAMING_SNAKE_CASE__ )
def a__ ( ) -> Optional[Any]:
_configure_library_root_logger()
UpperCAmelCase__ : List[str] = False
def a__ ( ) -> int:
_configure_library_root_logger()
UpperCAmelCase__ : List[Any] = True
def a__ ( ) -> Union[str, Any]:
UpperCAmelCase__ : Dict = _get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase__ : Union[str, Any] = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(SCREAMING_SNAKE_CASE__ )
def a__ ( ) -> Tuple:
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(SCREAMING_SNAKE_CASE__ )
def a__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : int = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , SCREAMING_SNAKE_CASE__ )
if no_advisory_warnings:
return
self.warning(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = warning_advice
@functools.lru_cache(SCREAMING_SNAKE_CASE__ )
def a__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
self.warning(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = warning_once
class lowerCamelCase_ :
def __init__( self : Union[str, Any] , *_A : str , **_A : Tuple ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCAmelCase__ : List[str] = args[0] if args else None
def __iter__( self : Optional[Any] ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : Any , _A : Union[str, Any] ):
'''simple docstring'''
def empty_fn(*_A : List[Any] , **_A : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Union[str, Any] ):
'''simple docstring'''
return self
def __exit__( self : List[Any] , _A : int , _A : Optional[int] , _A : Dict ):
'''simple docstring'''
return
class lowerCamelCase_ :
def __call__( self : str , *_A : str , **_A : Any ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*__snake_case , **__snake_case )
else:
return EmptyTqdm(*__snake_case , **__snake_case )
def lowercase_ ( self : Tuple , *_A : Any , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__snake_case , **__snake_case )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCamelCase__ = _tqdm_cls()
def a__ ( ) -> Union[str, Any]:
global _tqdm_active
return bool(_tqdm_active )
def a__ ( ) -> Tuple:
global _tqdm_active
UpperCAmelCase__ : Optional[int] = True
hf_hub_utils.enable_progress_bars()
def a__ ( ) -> Dict:
global _tqdm_active
UpperCAmelCase__ : Any = False
hf_hub_utils.disable_progress_bars()
| 181 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=18 , __snake_case=30 , __snake_case=400 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=None , ):
_SCREAMING_SNAKE_CASE : Any = size if size is not None else {"""shortest_edge""": 20}
_SCREAMING_SNAKE_CASE : int = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_SCREAMING_SNAKE_CASE : Dict = parent
_SCREAMING_SNAKE_CASE : Any = batch_size
_SCREAMING_SNAKE_CASE : Any = num_channels
_SCREAMING_SNAKE_CASE : Dict = image_size
_SCREAMING_SNAKE_CASE : Any = min_resolution
_SCREAMING_SNAKE_CASE : Any = max_resolution
_SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
_SCREAMING_SNAKE_CASE : Tuple = size
_SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
_SCREAMING_SNAKE_CASE : str = crop_size
def UpperCAmelCase_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """do_center_crop""" ) )
self.assertTrue(hasattr(__snake_case , """crop_size""" ) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
# Initialize image_processing
_SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : int = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase_ ( self ):
# Initialize image_processing
_SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE : str = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 200 | 0 |
from typing import List
import numpy as np
def lowerCAmelCase_ ( _snake_case : dict ) -> int:
'''simple docstring'''
__magic_name__ : Any = {key: len(_snake_case ) for key, value in gen_kwargs.items() if isinstance(_snake_case , _snake_case )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
__magic_name__ : int = max(lists_lengths.values() , default=0 )
return max(1 , _snake_case )
def lowerCAmelCase_ ( _snake_case : int , _snake_case : int ) -> List[range]:
'''simple docstring'''
__magic_name__ : Dict = []
for group_idx in range(_snake_case ):
__magic_name__ : List[str] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__magic_name__ : Tuple = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__magic_name__ : Optional[int] = range(_snake_case , start + num_shards_to_add )
shards_indices_per_group.append(_snake_case )
return shards_indices_per_group
def lowerCAmelCase_ ( _snake_case : dict , _snake_case : int ) -> List[dict]:
'''simple docstring'''
__magic_name__ : int = _number_of_shards_in_gen_kwargs(_snake_case )
if num_shards == 1:
return [dict(_snake_case )]
else:
__magic_name__ : str = _distribute_shards(num_shards=_snake_case , max_num_jobs=_snake_case )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(_snake_case , _snake_case )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(_snake_case ) )
]
def lowerCAmelCase_ ( _snake_case : List[dict] ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , _snake_case )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCAmelCase_ ( _snake_case : np.random.Generator , _snake_case : dict ) -> dict:
'''simple docstring'''
__magic_name__ : Optional[Any] = {len(_snake_case ) for value in gen_kwargs.values() if isinstance(_snake_case , _snake_case )}
__magic_name__ : int = {}
for size in list_sizes:
__magic_name__ : List[Any] = list(range(_snake_case ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__magic_name__ : Any = dict(_snake_case )
for key, value in shuffled_kwargs.items():
if isinstance(_snake_case , _snake_case ):
__magic_name__ : Dict = [value[i] for i in indices_per_size[len(_snake_case )]]
return shuffled_kwargs
| 41 |
def lowerCAmelCase_ ( _snake_case : int , _snake_case : Optional[int] ) -> List[Any]:
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any]=0 ) -> str:
'''simple docstring'''
return sorted(_snake_case , key=lambda _snake_case : x[column] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[int]=float("inf" ) ) -> Tuple:
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , _snake_case ):
__magic_name__ : List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__magic_name__ : Any = current_dis
return min_dis
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : str , _snake_case : str=float("inf" ) ) -> Dict:
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , _snake_case ):
for j in range(max(0 , i - 6 ) , _snake_case ):
__magic_name__ : str = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__magic_name__ : List[str] = current_dis
return min_dis
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> List[Any]:
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(_snake_case , _snake_case )
# recursion
__magic_name__ : Tuple = points_counts // 2
__magic_name__ : Dict = closest_pair_of_points_sqr(
_snake_case , points_sorted_on_y[:mid] , _snake_case )
__magic_name__ : Optional[int] = closest_pair_of_points_sqr(
_snake_case , points_sorted_on_y[mid:] , points_counts - mid )
__magic_name__ : int = min(_snake_case , _snake_case )
__magic_name__ : Optional[int] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_snake_case )
__magic_name__ : Tuple = dis_between_closest_in_strip(
_snake_case , len(_snake_case ) , _snake_case )
return min(_snake_case , _snake_case )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
__magic_name__ : Union[str, Any] = column_based_sort(_snake_case , column=0 )
__magic_name__ : List[Any] = column_based_sort(_snake_case , column=1 )
return (
closest_pair_of_points_sqr(
_snake_case , _snake_case , _snake_case )
) ** 0.5
if __name__ == "__main__":
snake_case : List[str] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 41 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = ["image_processor", "tokenizer"]
lowercase__ = "FlavaImageProcessor"
lowercase__ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Any , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase_ , )
lowercase_ = kwargs.pop("""feature_extractor""")
lowercase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.image_processor
def __call__( self : str , lowerCAmelCase_ : Optional[ImageInput] = None , lowerCAmelCase_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : List[Any] , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
lowercase_ = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if images is not None:
lowercase_ = self.image_processor(
lowerCAmelCase_ , return_image_mask=lowerCAmelCase_ , return_codebook_pixels=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase_)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_) , tensor_type=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : List[Any]):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : str):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = self.tokenizer.model_input_names
lowercase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase_ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase_ , )
return self.image_processor
| 136 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCAmelCase : Union[str, Any] = "__DUMMY_TRANSFORMERS_USER__"
UpperCAmelCase : Dict = "Dummy User"
UpperCAmelCase : Optional[int] = "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"
UpperCAmelCase : Tuple = "https://hub-ci.huggingface.co"
UpperCAmelCase : Optional[Any] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
UpperCAmelCase : Tuple = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
UpperCAmelCase : int = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __lowerCAmelCase )
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __lowerCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __lowerCAmelCase )
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __lowerCAmelCase )
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
HfFolder.save_token(__lowerCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def _SCREAMING_SNAKE_CASE () -> Dict:
'''simple docstring'''
return HfApi(endpoint=__lowerCAmelCase )
@pytest.fixture(scope="""session""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = HfFolder.get_token()
HfFolder.save_token(__lowerCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__lowerCAmelCase )
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
def _cleanup_repo(__lowerCAmelCase ):
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
@contextmanager
def _temporary_repo(__lowerCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__lowerCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = F'''repo_txt_data-{int(time.time() * 10E3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = F'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = F'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
lowercase_ = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" , private=__lowerCAmelCase )
hf_api.upload_file(
token=__lowerCAmelCase , path_or_fileobj=str(__lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__lowerCAmelCase , token=__lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 136 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__snake_case = logging.getLogger(__name__)
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Optional[int] = '''summarization'''
__UpperCAmelCase : Any = ['''loss''']
__UpperCAmelCase : Any = ROUGE_KEYS
__UpperCAmelCase : Tuple = '''rouge2'''
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
snake_case : List[Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(UpperCamelCase__ , num_labels=UpperCamelCase__ , mode=self.mode , **UpperCamelCase__ )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
snake_case : Union[str, Any] = Path(self.output_dir ) / "metrics.json"
snake_case : Tuple = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
snake_case : Any = 0
snake_case : Tuple = defaultdict(UpperCamelCase__ )
snake_case : Union[str, Any] = self.config.model_type
snake_case : Dict = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
snake_case : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
snake_case : List[Any] = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
snake_case : int = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
snake_case : List[str] = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
snake_case : Union[str, Any] = get_git_info()["repo_sha"]
snake_case : Any = hparams.num_workers
snake_case : List[str] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCamelCase__ ):
snake_case : Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
snake_case : Any = self.decoder_start_token_id
snake_case : Dict = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
snake_case : Optional[Any] = False
snake_case : List[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
snake_case : List[Any] = self.hparams.eval_max_gen_length
else:
snake_case : Tuple = self.model.config.max_length
snake_case : int = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCamelCase ( self , UpperCamelCase__ ) -> Dict[str, List[str]]:
'''simple docstring'''
snake_case : List[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(UpperCamelCase__ , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
snake_case : Optional[int] = True
return readable_batch
def lowerCamelCase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.model(UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
snake_case : Any = self.tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return lmap(str.strip , UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : int = self.tokenizer.pad_token_id
snake_case : int = batch["input_ids"], batch["attention_mask"]
snake_case : Dict = batch["labels"]
if isinstance(self.model , UpperCamelCase__ ):
snake_case : Optional[int] = self.model._shift_right(UpperCamelCase__ )
else:
snake_case : List[str] = shift_tokens_right(UpperCamelCase__ , UpperCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
snake_case : List[Any] = decoder_input_ids
self.save_readable_batch(UpperCamelCase__ )
snake_case : Dict = self(UpperCamelCase__ , attention_mask=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , use_cache=UpperCamelCase__ )
snake_case : Dict = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
snake_case : Tuple = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
snake_case : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
snake_case : Optional[int] = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
snake_case : Optional[Any] = label_smoothed_nll_loss(
UpperCamelCase__ , UpperCamelCase__ , self.hparams.label_smoothing , ignore_index=UpperCamelCase__ )
return (loss,)
@property
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = self._step(UpperCamelCase__ )
snake_case : List[str] = dict(zip(self.loss_names , UpperCamelCase__ ) )
# tokens per batch
snake_case : Tuple = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
snake_case : Tuple = batch["input_ids"].shape[0]
snake_case : Dict = batch["input_ids"].eq(self.pad ).sum()
snake_case : Optional[int] = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
snake_case : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
snake_case : Optional[int] = losses["loss"]
snake_case : List[str] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
snake_case : List[str] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
snake_case : torch.FloatTensor = torch.tensor(UpperCamelCase__ ).type_as(UpperCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCamelCase__ )
snake_case : Tuple = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
snake_case : Optional[Any] = self.step_count
self.metrics[prefix].append(UpperCamelCase__ ) # callback writes this to self.metrics_save_path
snake_case : Optional[Any] = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return calculate_rouge(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> dict:
'''simple docstring'''
snake_case : str = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
snake_case : Optional[int] = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=UpperCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
snake_case : Tuple = (time.time() - ta) / batch["input_ids"].shape[0]
snake_case : List[str] = self.ids_to_clean_text(UpperCamelCase__ )
snake_case : List[str] = self.ids_to_clean_text(batch["labels"] )
snake_case : Any = self._step(UpperCamelCase__ )
snake_case : int = dict(zip(self.loss_names , UpperCamelCase__ ) )
snake_case : Dict = self.calc_generative_metrics(UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = np.mean(lmap(UpperCamelCase__ , UpperCamelCase__ ) )
base_metrics.update(gen_time=UpperCamelCase__ , gen_len=UpperCamelCase__ , preds=UpperCamelCase__ , target=UpperCamelCase__ , **UpperCamelCase__ )
return base_metrics
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.validation_epoch_end(UpperCamelCase__ , prefix="test" )
def lowerCamelCase ( self , UpperCamelCase__ ) -> SeqaSeqDataset:
'''simple docstring'''
snake_case : Optional[Any] = self.n_obs[type_path]
snake_case : Union[str, Any] = self.target_lens[type_path]
snake_case : List[Any] = self.dataset_class(
self.tokenizer , type_path=UpperCamelCase__ , n_obs=UpperCamelCase__ , max_target_length=UpperCamelCase__ , **self.dataset_kwargs , )
return dataset
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
'''simple docstring'''
snake_case : Dict = self.get_dataset(UpperCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
snake_case : Tuple = dataset.make_sortish_sampler(UpperCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
snake_case : Tuple = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
def lowerCamelCase ( self ) -> DataLoader:
'''simple docstring'''
snake_case : Any = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
return dataloader
def lowerCamelCase ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def lowerCamelCase ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
add_generic_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--max_source_length" , default=1024 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=UpperCamelCase__ )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=UpperCamelCase__ )
parser.add_argument("--max_tokens_per_batch" , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument("--logger_name" , type=UpperCamelCase__ , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=UpperCamelCase__ , default=500 , required=UpperCamelCase__ , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=UpperCamelCase__ , default="summarization" , required=UpperCamelCase__ , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=UpperCamelCase__ , default=0.0 , required=UpperCamelCase__ )
parser.add_argument("--src_lang" , type=UpperCamelCase__ , default="" , required=UpperCamelCase__ )
parser.add_argument("--tgt_lang" , type=UpperCamelCase__ , default="" , required=UpperCamelCase__ )
parser.add_argument("--eval_beams" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ )
parser.add_argument(
"--val_metric" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=UpperCamelCase__ , default=1 , required=UpperCamelCase__ , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Any = '''translation'''
__UpperCAmelCase : List[Any] = ['''loss''']
__UpperCAmelCase : str = ['''bleu''']
__UpperCAmelCase : List[Any] = '''bleu'''
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(UpperCamelCase__ , **UpperCamelCase__ )
snake_case : Optional[int] = hparams.src_lang
snake_case : Union[str, Any] = hparams.tgt_lang
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> dict:
'''simple docstring'''
return calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCAmelCase ( lowercase : int , lowercase : int=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=lowercase )
check_output_dir(lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
snake_case : SummarizationModule = SummarizationModule(lowercase )
else:
snake_case : SummarizationModule = TranslationModule(lowercase )
snake_case : str = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
snake_case : int = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
snake_case : Optional[int] = os.environ.get("WANDB_PROJECT" , lowercase )
snake_case : str = WandbLogger(name=model.output_dir.name , project=lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
snake_case : Union[str, Any] = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
snake_case : int = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
snake_case : Any = False
snake_case : Dict = args.val_metric == "loss"
snake_case : pl.Trainer = generic_train(
lowercase , lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , lowercase ) , early_stopping_callback=lowercase , logger=lowercase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
snake_case : Union[str, Any] = ""
snake_case : str = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=lowercase ) )
if checkpoints:
snake_case : Optional[int] = checkpoints[-1]
snake_case : Any = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = pl.Trainer.add_argparse_args(parser)
__snake_case = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__snake_case = parser.parse_args()
main(args)
| 351 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase : int , lowercase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case : Dict = update_area_of_max_square(lowercase , col + 1 )
snake_case : Tuple = update_area_of_max_square(row + 1 , col + 1 )
snake_case : Any = update_area_of_max_square(row + 1 , lowercase )
if mat[row][col]:
snake_case : List[Any] = 1 + min([right, diagonal, down] )
snake_case : Any = max(largest_square_area[0] , lowercase )
return sub_problem_sol
else:
return 0
snake_case : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase : int , lowercase : int , lowercase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case : List[str] = update_area_of_max_square_using_dp_array(lowercase , col + 1 , lowercase )
snake_case : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase )
snake_case : Optional[int] = update_area_of_max_square_using_dp_array(row + 1 , lowercase , lowercase )
if mat[row][col]:
snake_case : Dict = 1 + min([right, diagonal, down] )
snake_case : Union[str, Any] = max(largest_square_area[0] , lowercase )
snake_case : str = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case : Union[str, Any] = [0]
snake_case : int = [[-1] * cols for _ in range(lowercase )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase )
return largest_square_area[0]
def __lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : list[list[int]] ) -> int:
"""simple docstring"""
snake_case : int = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case : Tuple = dp_array[row][col + 1]
snake_case : Any = dp_array[row + 1][col + 1]
snake_case : List[str] = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case : Optional[int] = 1 + min(lowercase , lowercase , lowercase )
snake_case : Tuple = max(dp_array[row][col] , lowercase )
else:
snake_case : List[Any] = 0
return largest_square_area
def __lowerCAmelCase ( lowercase : int , lowercase : int , lowercase : list[list[int]] ) -> int:
"""simple docstring"""
snake_case : Any = [0] * (cols + 1)
snake_case : Any = [0] * (cols + 1)
snake_case : Any = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
snake_case : Dict = current_row[col + 1]
snake_case : List[Any] = next_row[col + 1]
snake_case : Dict = next_row[col]
if mat[row][col] == 1:
snake_case : Union[str, Any] = 1 + min(lowercase , lowercase , lowercase )
snake_case : Optional[int] = max(current_row[col] , lowercase )
else:
snake_case : Optional[Any] = 0
snake_case : str = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 112 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : list ) -> List[Any]:
_enforce_args(lowerCAmelCase__ , lowerCAmelCase__ )
if n == 0:
return 0
__a = float('''-inf''' )
for i in range(1 , n + 1 ):
__a = max(
lowerCAmelCase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCAmelCase__ ) )
return max_revue
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : list ) -> List[str]:
_enforce_args(lowerCAmelCase__ , lowerCAmelCase__ )
__a = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : list , lowerCAmelCase__ : list ) -> Union[str, Any]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__a = float('''-inf''' )
for i in range(1 , n + 1 ):
__a = max(
lowerCAmelCase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCAmelCase__ , lowerCAmelCase__ ) , )
__a = max_revenue
return max_rev[n]
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : list ) -> Dict:
_enforce_args(lowerCAmelCase__ , lowerCAmelCase__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__a = [float('''-inf''' ) for _ in range(n + 1 )]
__a = 0
for i in range(1 , n + 1 ):
__a = max_rev[i]
for j in range(1 , i + 1 ):
__a = max(lowerCAmelCase__ , prices[j - 1] + max_rev[i - j] )
__a = max_revenue_i
return max_rev[n]
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : list ) -> str:
if n < 0:
__a = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(lowerCAmelCase__ )
if n > len(lowerCAmelCase__ ):
__a = (
'''Each integral piece of rod must have a corresponding price. '''
f'''Got n = {n} but length of prices = {len(lowerCAmelCase__ )}'''
)
raise ValueError(lowerCAmelCase__ )
def lowercase ( ) -> int:
__a = [6, 10, 12, 15, 20, 23]
__a = len(lowerCAmelCase__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__a = 36
__a = top_down_cut_rod(lowerCAmelCase__ , lowerCAmelCase__ )
__a = bottom_up_cut_rod(lowerCAmelCase__ , lowerCAmelCase__ )
__a = naive_cut_rod_recursive(lowerCAmelCase__ , lowerCAmelCase__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 45 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str ) -> list:
if n_term == "":
return []
__a = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
lowercase_ = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 45 | 1 |
def lowercase__ ( __snake_case : int = 1_000 ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = 2**power
UpperCAmelCase_ : Optional[Any] = 0
while n:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 145 |
from __future__ import annotations
def lowercase__ ( __snake_case : list[int] , __snake_case : int ):
'''simple docstring'''
if len(__snake_case ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCAmelCase_ : int = sum(array[:k] )
for i in range(len(__snake_case ) - k ):
UpperCAmelCase_ : List[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase_ : List[Any] = max(__snake_case , __snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__UpperCAmelCase = [randint(-1000, 1000) for i in range(100)]
__UpperCAmelCase = randint(0, 110)
print(F'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 145 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__magic_name__: Optional[int] = logging.get_logger(__name__)
class snake_case__ ( __A ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 342 |
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = data
SCREAMING_SNAKE_CASE : int = None
def __repr__( self : str ) -> str:
"""simple docstring"""
return F"Node({self.data})"
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = None
def __iter__( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self : str ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : Optional[Any] ) -> str:
"""simple docstring"""
return "->".join([str(a ) for item in self] )
def __getitem__( self : List[Any] , a : int ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , a : int , a : Any ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
SCREAMING_SNAKE_CASE : str = self.head
for _ in range(a ):
SCREAMING_SNAKE_CASE : str = current.next
SCREAMING_SNAKE_CASE : Any = data
def __UpperCamelCase ( self : List[str] , a : Any ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , a )
def __UpperCamelCase ( self : Union[str, Any] , a : Any ) -> None:
"""simple docstring"""
self.insert_nth(0 , a )
def __UpperCamelCase ( self : Optional[Any] , a : int , a : Any ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
SCREAMING_SNAKE_CASE : Any = Node(a )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Optional[int] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : List[Any] = new_node
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Optional[int] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next
SCREAMING_SNAKE_CASE : int = new_node
def __UpperCamelCase ( self : Optional[int] ) -> None: # print every node data
"""simple docstring"""
print(self )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def __UpperCamelCase ( self : Any ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __UpperCamelCase ( self : List[str] , a : int = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
SCREAMING_SNAKE_CASE : Tuple = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next.next
return delete_node.data
def __UpperCamelCase ( self : List[Any] ) -> bool:
"""simple docstring"""
return self.head is None
def __UpperCamelCase ( self : Optional[int] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : str = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Any = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : List[Any] = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : Any = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : str = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : Optional[Any] = prev
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(_a) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10):
assert len(_a) == i
linked_list.insert_nth(_a , i + 1)
assert str(_a) == "->".join(str(_a) for i in range(1 , 11))
linked_list.insert_head(0)
linked_list.insert_tail(11)
assert str(_a) == "->".join(str(_a) for i in range(0 , 12))
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9) == 10
assert linked_list.delete_tail() == 11
assert len(_a) == 9
assert str(_a) == "->".join(str(_a) for i in range(1 , 10))
assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True
for i in range(0 , 9):
SCREAMING_SNAKE_CASE : str = -i
assert all(linked_list[i] == -i for i in range(0 , 9)) is True
linked_list.reverse()
assert str(_a) == "->".join(str(_a) for i in range(-8 , 1))
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = [
-9,
100,
Node(77345112),
"dlrow olleH",
7,
5555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10),
None,
None,
12.20,
]
SCREAMING_SNAKE_CASE : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(_a)
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_a) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : Any = linked_list.delete_nth(10)
assert result is None
assert (
str(_a) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!"))
assert (
str(_a)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_a)
assert (
str(_a)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_a)
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase__ ( ):
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head ").strip())
linked_list.insert_head(input("Inserting 2nd at head ").strip())
print("\nPrint list:")
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail ").strip())
linked_list.insert_tail(input("Inserting 2nd at tail ").strip())
print("\nPrint list:")
linked_list.print_list()
print("\nDelete head")
linked_list.delete_head()
print("Delete tail")
linked_list.delete_tail()
print("\nPrint list:")
linked_list.print_list()
print("\nReverse linked list")
linked_list.reverse()
print("\nPrint list:")
linked_list.print_list()
print("\nString representation of linked list:")
print(_a)
print("\nReading/changing Node data using indexing:")
print(f"Element at Position 1: {linked_list[1]}")
SCREAMING_SNAKE_CASE : Dict = input("Enter New Value: ").strip()
print("New list:")
print(_a)
print(f"length of linked_list is : {len(_a)}")
if __name__ == "__main__":
main() | 76 | 0 |
def _lowerCamelCase( lowercase__ = "The quick brown fox jumps over the lazy dog" , ) -> int:
'''simple docstring'''
__lowercase= set()
# Replace all the whitespace in our sentence
__lowercase= input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowercase__ ) == 2_6
def _lowerCamelCase( lowercase__ = "The quick brown fox jumps over the lazy dog" , ) -> Any:
'''simple docstring'''
__lowercase= [False] * 2_6
for char in input_str:
if char.islower():
__lowercase= True
elif char.isupper():
__lowercase= True
return all(lowercase__ )
def _lowerCamelCase( lowercase__ = "The quick brown fox jumps over the lazy dog" , ) -> Tuple:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
from timeit import timeit
__lowercase= 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=lowercase__ ) )
print(timeit('is_pangram_faster()' , setup=lowercase__ ) )
print(timeit('is_pangram_fastest()' , setup=lowercase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 354 |
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 304 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = set()
# Replace all the whitespace in our sentence
__a = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCAmelCase__ ) == 26
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = [False] * 26
for char in input_str:
if char.islower():
__a = True
elif char.isupper():
__a = True
return all(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowercase ( ) -> None:
from timeit import timeit
__a = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_faster()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_fastest()''' , setup=lowerCAmelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 45 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowerCamelCase:
lowercase_ : int
lowercase_ : Node | None
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Node | None = None
for i in sorted(lowerCamelCase, reverse=lowerCamelCase):
_lowercase : Tuple = Node(lowerCamelCase, self.head)
def __iter__( self) -> Iterator[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.head
while node:
yield node.data
_lowercase : int = node.next_node
def __len__( self) -> int:
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCamelCase) for node in self])
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> SortedLinkedList:
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 21 | 0 |
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__A = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str:
if args.student_type == "roberta":
_lowerCAmelCase =False
elif args.student_type == "gpt2":
_lowerCAmelCase =False
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int:
if args.student_type == "roberta":
_lowerCAmelCase =False
def _lowerCamelCase() -> Any:
_lowerCAmelCase =argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__UpperCamelCase , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__UpperCamelCase , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__UpperCamelCase , type=__UpperCamelCase , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__UpperCamelCase , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__UpperCamelCase , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__UpperCamelCase , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__UpperCamelCase , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__UpperCamelCase , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__UpperCamelCase , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__UpperCamelCase , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=__UpperCamelCase , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__UpperCamelCase , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__UpperCamelCase , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__UpperCamelCase , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__UpperCamelCase , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__UpperCamelCase , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__UpperCamelCase , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__UpperCamelCase , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCamelCase , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=__UpperCamelCase , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__UpperCamelCase , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=__UpperCamelCase , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__UpperCamelCase , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__UpperCamelCase , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=__UpperCamelCase , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__UpperCamelCase , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__UpperCamelCase , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__UpperCamelCase , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__UpperCamelCase , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__UpperCamelCase , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__UpperCamelCase , default=4000 , help="""Checkpoint interval.""" )
_lowerCAmelCase =parser.parse_args()
sanity_checks(__UpperCamelCase )
# ARGS #
init_gpu_params(__UpperCamelCase )
set_seed(__UpperCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__UpperCamelCase ) , __UpperCamelCase , indent=4 )
git_log(args.dump_path )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =MODEL_CLASSES[args.student_type]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_lowerCAmelCase =teacher_tokenizer_class.from_pretrained(args.teacher_name )
_lowerCAmelCase ={}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_lowerCAmelCase =tokenizer.all_special_tokens.index(__UpperCamelCase )
_lowerCAmelCase =tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_lowerCAmelCase =special_tok_ids
_lowerCAmelCase =tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , """rb""" ) as fp:
_lowerCAmelCase =pickle.load(__UpperCamelCase )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , """rb""" ) as fp:
_lowerCAmelCase =pickle.load(__UpperCamelCase )
_lowerCAmelCase =np.maximum(__UpperCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_lowerCAmelCase =0.0 # do not predict special tokens
_lowerCAmelCase =torch.from_numpy(__UpperCamelCase )
else:
_lowerCAmelCase =None
_lowerCAmelCase =LmSeqsDataset(params=__UpperCamelCase , data=__UpperCamelCase )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_lowerCAmelCase =student_config_class.from_pretrained(args.student_config )
_lowerCAmelCase =True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_lowerCAmelCase =student_model_class.from_pretrained(args.student_pretrained_weights , config=__UpperCamelCase )
else:
_lowerCAmelCase =student_model_class(__UpperCamelCase )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
_lowerCAmelCase =teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__UpperCamelCase )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__UpperCamelCase , __UpperCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__UpperCamelCase , __UpperCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_lowerCAmelCase =Distiller(
params=__UpperCamelCase , dataset=__UpperCamelCase , token_probs=__UpperCamelCase , student=__UpperCamelCase , teacher=__UpperCamelCase )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 341 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _lowerCamelCase(__UpperCamelCase ) -> List[str]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__A = parser.parse_args()
__A = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 341 | 1 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase_ ( _lowerCamelCase: str = "" ) -> dict[str, float]:
'''simple docstring'''
__lowerCamelCase : List[str] = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
__lowerCamelCase : Union[str, Any] = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
__lowerCamelCase : int = soup.find_all("td" , attrs="titleColumn" )
__lowerCamelCase : Optional[int] = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowerCamelCase , _lowerCamelCase )
}
def lowercase_ ( _lowerCamelCase: str = "IMDb_Top_250_Movies.csv" ) -> None:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = get_imdb_top_aaa_movies()
with open(_lowerCamelCase , "w" , newline="" ) as out_file:
__lowerCamelCase : Tuple = csv.writer(_lowerCamelCase )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies() | 135 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> List[str]:
'''simple docstring'''
assert x is not None
assert y is not None
__lowerCamelCase : Optional[int] = len(_lowerCamelCase )
__lowerCamelCase : Optional[int] = len(_lowerCamelCase )
# declaring the array for storing the dp values
__lowerCamelCase : Any = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowerCamelCase : Dict = 1 if x[i - 1] == y[j - 1] else 0
__lowerCamelCase : List[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowerCamelCase : int = ""
__lowerCamelCase , __lowerCamelCase : int = m, n
while i > 0 and j > 0:
__lowerCamelCase : Optional[int] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowerCamelCase : Any = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A = '''AGGTAB'''
__A = '''GXTXAYB'''
__A = 4
__A = '''GTAB'''
__A, __A = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod() | 135 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 362 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : dict ) -> str:
_snake_case = BeautifulSoup(requests.get(__lowerCamelCase , params=__lowerCamelCase ).content , '''html.parser''' )
_snake_case = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
_snake_case = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 40 | 0 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
snake_case__ : Optional[int] = 'bert-base-cased'
snake_case__ : Union[str, Any] = 'fp16'
snake_case__ : Dict = 'bf16'
snake_case__ : Tuple = [FPaa, BFaa]
@require_fsdp
@require_cuda
class A_ ( _lowerCamelCase ):
def _lowerCAmelCase (self :Tuple )-> List[Any]:
super().setUp()
__A = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def _lowerCAmelCase (self :int )-> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_UpperCamelCase ):
__A = self.dist_env.copy()
__A = f"""{i + 1}"""
__A = strategy
with mockenv_context(**_UpperCamelCase ):
__A = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _lowerCAmelCase (self :Optional[Any] )-> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_UpperCamelCase ):
__A = self.dist_env.copy()
__A = prefetch_policy
with mockenv_context(**_UpperCamelCase ):
__A = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _lowerCAmelCase (self :Tuple )-> Optional[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_UpperCamelCase ):
__A = self.dist_env.copy()
__A = state_dict_type
with mockenv_context(**_UpperCamelCase ):
__A = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _lowerCAmelCase (self :List[str] )-> Optional[Any]:
__A = AutoModel.from_pretrained(_UpperCamelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
__A = self.dist_env.copy()
__A = policy
if policy == "TRANSFORMER_BASED_WRAP":
__A = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
__A = '''2000'''
with mockenv_context(**_UpperCamelCase ):
__A = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_UpperCamelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
__A = self.dist_env.copy()
__A = '''TRANSFORMER_BASED_WRAP'''
__A = '''T5Layer'''
with mockenv_context(**_UpperCamelCase ):
__A = FullyShardedDataParallelPlugin()
with self.assertRaises(_UpperCamelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(_UpperCamelCase )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
__A = self.dist_env.copy()
__A = '''SIZE_BASED_WRAP'''
__A = '''0'''
with mockenv_context(**_UpperCamelCase ):
__A = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_UpperCamelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _lowerCAmelCase (self :str )-> Union[str, Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
__A = self.dist_env.copy()
__A = mp_dtype
with mockenv_context(**_UpperCamelCase ):
__A = Accelerator()
if mp_dtype == "fp16":
__A = torch.floataa
elif mp_dtype == "bf16":
__A = torch.bfloataa
__A = MixedPrecision(param_dtype=_UpperCamelCase , reduce_dtype=_UpperCamelCase , buffer_dtype=_UpperCamelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _UpperCamelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _UpperCamelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_UpperCamelCase )
def _lowerCAmelCase (self :List[str] )-> Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
__A = self.dist_env.copy()
__A = str(_UpperCamelCase ).lower()
with mockenv_context(**_UpperCamelCase ):
__A = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_UpperCamelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class A_ ( _lowerCamelCase ):
def _lowerCAmelCase (self :str )-> int:
super().setUp()
__A = 0.8_2
__A = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
__A = {
'''multi_gpu_fp16''': 3200,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2000,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
__A = 160
__A = 160
__A = inspect.getfile(accelerate.test_utils )
__A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def _lowerCAmelCase (self :Union[str, Any] )-> Dict:
__A = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
__A = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
__A = cmd.copy()
for i, strategy in enumerate(_UpperCamelCase ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
def _lowerCAmelCase (self :str )-> List[str]:
__A = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
__A = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(_UpperCamelCase ):
__A = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
__A = len(_UpperCamelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
__A = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
__A = cmd_config[:-1]
__A = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
def _lowerCAmelCase (self :Optional[Any] )-> List[Any]:
__A = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
__A = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
__A = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(_UpperCamelCase ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
| 117 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
snake_case__ : Dict = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
snake_case__ : Optional[int] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: Any , lowerCamelCase: Union[str, Any] , lowerCamelCase: Any , lowerCamelCase: int ) -> List[str]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__A = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
__A = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
__A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__A = value
elif weight_type == "weight_g":
__A = value
elif weight_type == "weight_v":
__A = value
elif weight_type == "bias":
__A = value
else:
__A = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _a ( lowerCamelCase: List[str] , lowerCamelCase: Optional[int] ) -> Tuple:
'''simple docstring'''
__A = []
__A = fairseq_model.state_dict()
__A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__A = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__A = True
else:
for key, mapped_key in MAPPING.items():
__A = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__A = True
if "*" in mapped_key:
__A = name.split(lowerCamelCase )[0].split('''.''' )[-2]
__A = mapped_key.replace('''*''' , lowerCamelCase )
if "weight_g" in name:
__A = '''weight_g'''
elif "weight_v" in name:
__A = '''weight_v'''
elif "bias" in name:
__A = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A = '''weight'''
else:
__A = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _a ( lowerCamelCase: int , lowerCamelCase: Any , lowerCamelCase: int , lowerCamelCase: int , lowerCamelCase: List[str] ) -> Union[str, Any]:
'''simple docstring'''
__A = full_name.split('''conv_layers.''' )[-1]
__A = name.split('''.''' )
__A = int(items[0] )
__A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__A = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__A = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase )
@torch.no_grad()
def _a ( lowerCamelCase: Tuple , lowerCamelCase: int , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Optional[int]=True ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
__A = UniSpeechSatConfig.from_pretrained(lowerCamelCase )
else:
__A = UniSpeechSatConfig()
__A = ''''''
if is_finetuned:
__A = UniSpeechSatForCTC(lowerCamelCase )
else:
__A = UniSpeechSatForPreTraining(lowerCamelCase )
__A , __A , __A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__A = model[0].eval()
recursively_load_weights(lowerCamelCase , lowerCamelCase )
hf_wavavec.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
snake_case__ : Any = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 117 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__A : Optional[int] = logging.get_logger(__name__)
__A : Any = {'vocab_file': 'vocab.txt'}
__A : Union[str, Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__A : int = {
'YituTech/conv-bert-base': 5_12,
'YituTech/conv-bert-medium-small': 5_12,
'YituTech/conv-bert-small': 5_12,
}
__A : List[str] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:int = VOCAB_FILES_NAMES
_UpperCamelCase:int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase:Optional[int] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase:Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase:List[Any] = ConvBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , )-> int:
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
lowerCamelCase_ =getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
lowerCamelCase_ =do_lower_case
lowerCamelCase_ =strip_accents
lowerCamelCase_ =tokenize_chinese_chars
lowerCamelCase_ =normalizer_class(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =do_lower_case
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> Optional[int]:
lowerCamelCase_ =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]:
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]:
lowerCamelCase_ =self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 49 |
import math
def __UpperCamelCase ( _A : int = 100 ) ->int:
"""simple docstring"""
lowerCamelCase_ =sum(i * i for i in range(1 , n + 1 ) )
lowerCamelCase_ =int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 49 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
@property
def lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = """A red cat sitting on a park bench"""
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCAmelCase_ , output_type="""np""" , )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
snake_case_ = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case_ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
snake_case_ = """A red cat sitting on a park bench"""
snake_case_ = np.random.RandomState(0 )
snake_case_ = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCAmelCase_ , output_type="""np""" , )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 347 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : str=4 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : List[str] ) ->Dict:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Union[str, Any] = True
__lowercase: int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = FlaxRoFormerModelTester(self )
@slow
def lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=UpperCAmelCase_ )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(UpperCAmelCase_ )[0]
snake_case_ = 50_000
snake_case_ = (1, 6, vocab_size)
self.assertEqual(output.shape , UpperCAmelCase_ )
snake_case_ = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 347 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> List[str]:
a = []
for part_id in partition_order:
a = df.where(f'''SPARK_PARTITION_ID() = {part_id}''').collect()
for row_idx, row in enumerate(__UpperCamelCase):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
a = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
a = spark.range(1_00).repartition(1)
a = Spark(__UpperCamelCase)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
a = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
a = spark.range(10).repartition(2)
a = [1, 0]
a = _generate_iterable_examples(__UpperCamelCase , __UpperCamelCase) # Reverse the partitions.
a = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase , __UpperCamelCase)
for i, (row_id, row_dict) in enumerate(generate_fn()):
a , a = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
a = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
a = spark.range(10).repartition(1)
a = SparkExamplesIterable(__UpperCamelCase)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__UpperCamelCase):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
a = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
a = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator") as generator_mock:
a = lambda __UpperCamelCase: x.reverse()
a = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase , [2, 1, 0])
a = SparkExamplesIterable(__UpperCamelCase).shuffle_data_sources(__UpperCamelCase)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__UpperCamelCase):
a , a = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
a = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
a = spark.range(20).repartition(4)
# Partitions 0 and 2
a = SparkExamplesIterable(__UpperCamelCase).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
a = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase , [0, 2])
for i, (row_id, row_dict) in enumerate(__UpperCamelCase):
a , a = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a = SparkExamplesIterable(__UpperCamelCase).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
a = _get_expected_row_ids_and_row_dicts_for_partition_order(__UpperCamelCase , [1, 3])
for i, (row_id, row_dict) in enumerate(__UpperCamelCase):
a , a = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
a = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
a = spark.range(1_00).repartition(1)
a = Spark(__UpperCamelCase)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 180 |
import math
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE ( __UpperCamelCase = 0.1) -> int:
a = 3
a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(__UpperCamelCase)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = math.inf , _UpperCAmelCase = -math.inf , _UpperCAmelCase = math.inf , _UpperCAmelCase = -math.inf , _UpperCAmelCase = False , _UpperCAmelCase = 100 , _UpperCAmelCase = 0.01 , _UpperCAmelCase = 1 , ):
__a = False
__a = search_prob
__a = start_temperate
__a = []
__a = 0
__a = None
while not search_end:
__a = current_state.score()
if best_state is None or current_score > best_state.score():
__a = current_state
scores.append(_UpperCAmelCase )
iterations += 1
__a = None
__a = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__a = random.randint(0 , len(_UpperCAmelCase ) - 1 ) # picking a random neighbor
__a = neighbors.pop(_UpperCAmelCase )
__a = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__a = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__a = picked_neighbor
else:
__a = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__a = picked_neighbor
__a = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__a = True
else:
__a = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCAmelCase ) , _UpperCAmelCase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__snake_case :List[str] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case :int = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
__snake_case :str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__snake_case :Any = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (3 * x**2) - (6 * y)
__snake_case :List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case :List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
__snake_case :str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__snake_case :Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
| 49 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCamelCase( __UpperCamelCase : List[str] ):
lowerCAmelCase_ : List[str] = SwinvaConfig()
lowerCAmelCase_ : List[str] = swinva_name.split('''_''' )
lowerCAmelCase_ : str = name_split[1]
if "to" in name_split[3]:
lowerCAmelCase_ : List[Any] = int(name_split[3][-3:] )
else:
lowerCAmelCase_ : List[Any] = int(name_split[3] )
if "to" in name_split[2]:
lowerCAmelCase_ : List[str] = int(name_split[2][-2:] )
else:
lowerCAmelCase_ : int = int(name_split[2][6:] )
if model_size == "tiny":
lowerCAmelCase_ : Any = 96
lowerCAmelCase_ : List[str] = (2, 2, 6, 2)
lowerCAmelCase_ : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "small":
lowerCAmelCase_ : List[str] = 96
lowerCAmelCase_ : Any = (2, 2, 18, 2)
lowerCAmelCase_ : Dict = (3, 6, 12, 24)
elif model_size == "base":
lowerCAmelCase_ : Union[str, Any] = 128
lowerCAmelCase_ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase_ : Tuple = (4, 8, 16, 32)
else:
lowerCAmelCase_ : Optional[Any] = 192
lowerCAmelCase_ : List[Any] = (2, 2, 18, 2)
lowerCAmelCase_ : List[Any] = (6, 12, 24, 48)
if "to" in swinva_name:
lowerCAmelCase_ : Union[str, Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowerCAmelCase_ : Optional[int] = 21841
lowerCAmelCase_ : Any = '''huggingface/label-files'''
lowerCAmelCase_ : Tuple = '''imagenet-22k-id2label.json'''
lowerCAmelCase_ : Any = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : str = idalabel
lowerCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase_ : Optional[int] = 1000
lowerCAmelCase_ : Tuple = '''huggingface/label-files'''
lowerCAmelCase_ : Union[str, Any] = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ : Dict = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCAmelCase_ : int = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : List[str] = idalabel
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = img_size
lowerCAmelCase_ : Dict = num_classes
lowerCAmelCase_ : Dict = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Optional[int] = num_heads
lowerCAmelCase_ : Dict = window_size
return config
def UpperCamelCase( __UpperCamelCase : List[str] ):
if "patch_embed.proj" in name:
lowerCAmelCase_ : Dict = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ : List[Any] = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ : int = '''encoder.''' + name
if "attn.proj" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase_ : Tuple = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ : Tuple = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "q_bias" in name:
lowerCAmelCase_ : Tuple = name.replace('''q_bias''' ,'''query.bias''' )
if "k_bias" in name:
lowerCAmelCase_ : Tuple = name.replace('''k_bias''' ,'''key.bias''' )
if "v_bias" in name:
lowerCAmelCase_ : int = name.replace('''v_bias''' ,'''value.bias''' )
if "cpb_mlp" in name:
lowerCAmelCase_ : Any = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' )
if name == "norm.weight":
lowerCAmelCase_ : Dict = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ : Any = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ : int = name.replace('''head''' ,'''classifier''' )
else:
lowerCAmelCase_ : Union[str, Any] = '''swinv2.''' + name
return name
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ : Optional[int] = orig_state_dict.pop(__UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowerCAmelCase_ : Dict = key.split('''.''' )
lowerCAmelCase_ : Any = int(key_split[1] )
lowerCAmelCase_ : Optional[int] = int(key_split[3] )
lowerCAmelCase_ : Dict = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase_ : Optional[Any] = val[:dim, :]
lowerCAmelCase_ : Any = val[dim : dim * 2, :]
lowerCAmelCase_ : List[Any] = val[-dim:, :]
else:
lowerCAmelCase_ : Dict = val[:dim]
lowerCAmelCase_ : Union[str, Any] = val[
dim : dim * 2
]
lowerCAmelCase_ : Dict = val[-dim:]
else:
lowerCAmelCase_ : Optional[Any] = val
return orig_state_dict
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Dict ):
lowerCAmelCase_ : Optional[Any] = timm.create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
lowerCAmelCase_ : List[str] = get_swinva_config(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = SwinvaForImageClassification(__UpperCamelCase )
model.eval()
lowerCAmelCase_ : str = convert_state_dict(timm_model.state_dict() ,__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' ,'''-''' ) ) )
lowerCAmelCase_ : Union[str, Any] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
lowerCAmelCase_ : Optional[Any] = image_processor(images=__UpperCamelCase ,return_tensors='''pt''' )
lowerCAmelCase_ : List[str] = timm_model(inputs['''pixel_values'''] )
lowerCAmelCase_ : Union[str, Any] = model(**__UpperCamelCase ).logits
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase ,__UpperCamelCase ) ,organization='''nandwalritik''' ,commit_message='''Add model''' ,)
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A__ : Optional[Any] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 103 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowercase_ = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ) -> Optional[int]:
__a = SavedModel()
__a = []
with open(os.path.join(lowerCAmelCase__ , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
__a = json.load(lowerCAmelCase__ )['''opsets''']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(lowerCAmelCase__ )] )
with open(lowerCAmelCase__ , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
__a = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__a = sorted(lowerCAmelCase__ )
__a = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(lowerCAmelCase__ )
if strict and len(lowerCAmelCase__ ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(lowerCAmelCase__ ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*lowerCAmelCase__ , sep='''\n''' )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=1_2, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
lowercase_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 11 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[Any] = MBartTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''en_XX'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__A ={
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__A ={
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__A ={
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
__A ={
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
__A ={
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
__A ={
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
__A ={
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
__A ={
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRContextEncoderTokenizer
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = DPRQuestionEncoderTokenizer
__A =collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
__A =collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
__A =R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(snake_case_ )
class _SCREAMING_SNAKE_CASE :
def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
elif titles is None or texts is None:
lowerCamelCase_ = titles if texts is None else texts
return super().__call__(
lowercase , lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , )
lowerCamelCase_ = titles if not isinstance(lowercase , lowercase ) else [titles]
lowerCamelCase_ = texts if not isinstance(lowercase , lowercase ) else [texts]
lowerCamelCase_ = len(lowercase )
lowerCamelCase_ = questions if not isinstance(lowercase , lowercase ) else [questions] * n_passages
assert len(lowercase ) == len(
lowercase ), f'There should be as many titles than texts but got {len(lowercase )} titles and {len(lowercase )} texts.'
lowerCamelCase_ = super().__call__(lowercase , lowercase , padding=lowercase , truncation=lowercase )["input_ids"]
lowerCamelCase_ = super().__call__(lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase )["input_ids"]
lowerCamelCase_ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase , lowercase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ = attention_mask
return self.pad(lowercase , padding=lowercase , max_length=lowercase , return_tensors=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = 16 , lowercase = 64 , lowercase = 4 , ) -> List[DPRSpanPrediction]:
lowerCamelCase_ = reader_input["input_ids"]
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = reader_output[:3]
lowerCamelCase_ = len(lowercase )
lowerCamelCase_ = sorted(range(lowercase ) , reverse=lowercase , key=relevance_logits.__getitem__ )
lowerCamelCase_ = []
for doc_id in sorted_docs:
lowerCamelCase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ = sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ = len(lowercase )
lowerCamelCase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase , top_spans=lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase , start_index=lowercase , end_index=lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , ) -> List[DPRSpanPrediction]:
lowerCamelCase_ = []
for start_index, start_score in enumerate(lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ = sorted(lowercase , key=lambda lowercase : x[1] , reverse=lowercase )
lowerCamelCase_ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
lowerCamelCase_ = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case_ )
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = DPRReaderTokenizer
| 19 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__A =logging.get_logger(__name__)
def lowerCamelCase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
lowerCamelCase_ = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
lowerCamelCase_ = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
lowerCamelCase_ = json.loads(lowerCamelCase__ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCamelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , lowercase , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
lowerCamelCase_ = torch.device("cpu" )
lowerCamelCase_ = 0
elif is_sagemaker_model_parallel_available():
lowerCamelCase_ = smp.local_rank()
lowerCamelCase_ = torch.device("cuda" , lowercase )
lowerCamelCase_ = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
lowerCamelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
lowerCamelCase_ = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
lowerCamelCase_ = torch.device("cuda" , self.local_rank )
lowerCamelCase_ = 1
if device.type == "cuda":
torch.cuda.set_device(lowercase )
return device
@property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return False
| 19 | 1 |
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
lowercase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase = 1
if upper_limit > 0:
lowercase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowerCAmelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
lowercase__ :Union[str, Any] = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 97 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase__ :Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ :int = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowercase__ :List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
lowercase__ :List[str] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
lowercase = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , lowerCAmelCase__ , )
is not None
):
lowercase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
lowercase = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
lowercase = True
if not attribute_used:
lowercase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase = True
elif attribute.endswith('''_token_id''' ):
lowercase = True
# configuration class specific cases
if not case_allowed:
lowercase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowercase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = dict(inspect.signature(config_class.__init__ ).parameters )
lowercase = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
lowercase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase = {}
if len(config_class.attribute_map ) > 0:
lowercase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase = inspect.getsourcefile(lowerCAmelCase__ )
lowercase = os.path.dirname(lowerCAmelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for fn in os.listdir(lowerCAmelCase__ ) if fn.startswith('''modeling_''' )]
# Get the source code strings
lowercase = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as fp:
modeling_sources.append(fp.read() )
lowercase = []
for config_param, default_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
# `attributes` here is all the variant names for `config_param`
lowercase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase__ )
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda lowerCAmelCase__ : inspect.isclass(lowerCAmelCase__ )
and issubclass(lowerCAmelCase__ , lowerCAmelCase__ )
and inspect.getmodule(lowerCAmelCase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowercase = check_config_attributes_being_used(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase = unused_attributes
if len(lowerCAmelCase__ ) > 0:
lowercase = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
check_config_attributes()
| 97 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = GPTSanJapaneseTokenizer
__UpperCamelCase = False
__UpperCamelCase = {'''do_clean_text''': False, '''add_prefix_space''': False}
def lowerCamelCase__ ( self : Optional[int] ):
super().setUp()
# fmt: off
lowerCAmelCase : List[str] = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase : int = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
lowerCAmelCase : List[str] = {'''unk_token''': '''<unk>'''}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Union[str, Any] , **UpperCamelCase_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : int = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
lowerCAmelCase : List[Any] = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Dict ):
lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_input_output_texts(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowerCAmelCase : List[str] = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def lowerCamelCase__ ( self : Any ):
pass # TODO add if relevant
def lowerCamelCase__ ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase__ ( self : Union[str, Any] ):
pass # TODO add if relevant
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase : int = '''こんにちは、世界。 こんばんは、㔺界。'''
lowerCAmelCase : Union[str, Any] = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
lowerCAmelCase : Dict = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing conversion to ids without special tokens
lowerCAmelCase : int = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing conversion to ids with special tokens
lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
lowerCAmelCase : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase : str = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
lowerCAmelCase : List[Any] = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
lowerCAmelCase : Dict = tokenizer.encode(UpperCamelCase_ )
lowerCAmelCase : Dict = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : str = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowerCAmelCase : Optional[int] = '''こんにちは、世界。'''
lowerCAmelCase : Dict = '''こんばんは、㔺界。😀'''
lowerCAmelCase : int = '''こんにちは、世界。こんばんは、世界。😀'''
lowerCAmelCase : Any = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase : List[str] = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
lowerCAmelCase : Any = tokenizer.encode(UpperCamelCase_ , prefix_text=UpperCamelCase_ )
lowerCAmelCase : Tuple = tokenizer.decode(UpperCamelCase_ )
lowerCAmelCase : List[str] = tokenizer.decode(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
lowerCAmelCase : Optional[int] = '''こんにちは、世界。'''
lowerCAmelCase : Union[str, Any] = '''こんばんは、㔺界。😀'''
lowerCAmelCase : Dict = len(tokenizer.encode(UpperCamelCase_ ) ) - 2
lowerCAmelCase : List[Any] = len(tokenizer.encode(UpperCamelCase_ ) ) - 2
lowerCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase : Tuple = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase : int = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase : Dict = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase : Optional[int] = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase : Dict = tokenizer(UpperCamelCase_ , prefix_text=UpperCamelCase_ ).token_type_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowerCAmelCase : List[Any] = tokenizer.encode('''あンいワ''' )
lowerCAmelCase : List[Any] = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
lowerCAmelCase : List[Any] = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , tokenizer.decode(UpperCamelCase_ ) )
self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , tokenizer.decode(UpperCamelCase_ ) )
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
lowerCAmelCase : List[Any] = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
lowerCAmelCase : List[str] = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(UpperCamelCase_ , padding=UpperCamelCase_ )
# fmt: off
lowerCAmelCase : Optional[Any] = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
lowerCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase : int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCamelCase_ )
self.assertListEqual(x_token.token_type_ids , UpperCamelCase_ )
self.assertListEqual(x_token.attention_mask , UpperCamelCase_ )
self.assertListEqual(x_token_a.input_ids , UpperCamelCase_ )
self.assertListEqual(x_token_a.token_type_ids , UpperCamelCase_ )
self.assertListEqual(x_token_a.attention_mask , UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def lowerCamelCase__ ( self : int ):
# tokenizer has no padding token
pass
| 60 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a = logging.get_logger(__name__)
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 315 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : Dict , lowerCamelCase : int ) -> List[Any]:
__snake_case : List[Any] = params
__snake_case : int = np.array(lowerCamelCase )
__snake_case : Any = np.array([len(lowerCamelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[Any] , lowerCamelCase : Dict ) -> str:
return (self.token_ids[index], self.lengths[index])
def __len__( self : Dict ) -> Union[str, Any]:
return len(self.lengths )
def __snake_case ( self : Optional[int] ) -> str:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __snake_case ( self : Union[str, Any] ) -> List[str]:
__snake_case : Optional[int] = self.params.max_model_input_size
__snake_case : Union[str, Any] = self.lengths > max_len
logger.info(F'Splitting {sum(lowerCamelCase )} too long sequences.' )
def divide_chunks(lowerCamelCase : str , lowerCamelCase : Any ):
return [l[i : i + n] for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )]
__snake_case : str = []
__snake_case : int = []
if self.params.mlm:
__snake_case : Optional[int] = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
__snake_case : List[Any] = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__snake_case : str = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__snake_case : List[Any] = np.insert(lowerCamelCase , 0 , lowerCamelCase )
if sub_s[-1] != sep_id:
__snake_case : Optional[Any] = np.insert(lowerCamelCase , len(lowerCamelCase ) , lowerCamelCase )
assert len(lowerCamelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCamelCase )
new_tok_ids.extend(lowerCamelCase )
new_lengths.extend([len(lowerCamelCase ) for l in sub_seqs] )
__snake_case : str = np.array(lowerCamelCase )
__snake_case : Optional[Any] = np.array(lowerCamelCase )
def __snake_case ( self : Any ) -> List[Any]:
__snake_case : List[Any] = len(self )
__snake_case : int = self.lengths > 11
__snake_case : str = self.token_ids[indices]
__snake_case : Optional[int] = self.lengths[indices]
__snake_case : Tuple = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def __snake_case ( self : int ) -> Optional[Any]:
if "unk_token" not in self.params.special_tok_ids:
return
else:
__snake_case : str = self.params.special_tok_ids["unk_token"]
__snake_case : Optional[Any] = len(self )
__snake_case : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__snake_case : int = (unk_occs / self.lengths) < 0.5
__snake_case : Tuple = self.token_ids[indices]
__snake_case : List[str] = self.lengths[indices]
__snake_case : List[Any] = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def __snake_case ( self : str ) -> List[str]:
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __snake_case ( self : Any , lowerCamelCase : Any ) -> Union[str, Any]:
__snake_case : str = [t[0] for t in batch]
__snake_case : Union[str, Any] = [t[1] for t in batch]
assert len(lowerCamelCase ) == len(lowerCamelCase )
# Max for paddings
__snake_case : List[str] = max(lowerCamelCase )
# Pad token ids
if self.params.mlm:
__snake_case : Any = self.params.special_tok_ids["pad_token"]
else:
__snake_case : List[str] = self.params.special_tok_ids["unk_token"]
__snake_case : Any = [list(t.astype(lowerCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(lowerCamelCase )) for t in token_ids]
assert len(tk_ ) == len(lowerCamelCase )
assert all(len(lowerCamelCase ) == max_seq_len_ for t in tk_ )
__snake_case : Tuple = torch.tensor(tk_ ) # (bs, max_seq_len_)
__snake_case : str = torch.tensor(lowerCamelCase ) # (bs)
return tk_t, lg_t
| 369 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if index == r:
for j in range(__lowerCamelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__snake_case : Union[str, Any] = arr[i]
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 , __lowerCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# A temporary array to store all combination one by one
__snake_case : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 , __lowerCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_snake_case : List[str] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 134 | 0 |
'''simple docstring'''
_A : Tuple ={str(digit): digit**5 for digit in range(10)}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) )
def SCREAMING_SNAKE_CASE_ () -> int:
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 41 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any=7 , UpperCamelCase__: Optional[int]=3 , UpperCamelCase__: List[str]=18 , UpperCamelCase__: Union[str, Any]=30 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Any=True , UpperCamelCase__: Union[str, Any]=None , UpperCamelCase__: List[Any]=True , UpperCamelCase__: List[Any]=False , UpperCamelCase__: Tuple=True , UpperCamelCase__: Tuple=True , UpperCamelCase__: Any=[0.5, 0.5, 0.5] , UpperCamelCase__: Optional[Any]=[0.5, 0.5, 0.5] , ):
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[int] = min_resolution
lowerCamelCase__ : Optional[Any] = max_resolution
lowerCamelCase__ : Union[str, Any] = do_resize
lowerCamelCase__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 20}
lowerCamelCase__ : Dict = do_thumbnail
lowerCamelCase__ : Optional[int] = do_align_axis
lowerCamelCase__ : Any = do_pad
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Union[str, Any] = image_mean
lowerCamelCase__ : Union[str, Any] = image_std
def lowerCamelCase_ ( self: str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase ( _lowercase , unittest.TestCase ):
a = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Any = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
lowerCamelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def lowerCamelCase_ ( self: List[str] ):
pass
@is_flaky()
def lowerCamelCase_ ( self: Union[str, Any] ):
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : List[str] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Optional[int] ):
# Initialize image_processing
lowerCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def lowerCamelCase_ ( self: Dict ):
# Initialize image_processing
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
lowerCamelCase__ : Tuple = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 41 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class __lowerCAmelCase :
snake_case : torch.Tensor # [batch_size x 3]
snake_case : torch.Tensor # [batch_size x 3]
snake_case : torch.Tensor # [batch_size x 3]
snake_case : torch.Tensor # [batch_size x 3]
snake_case : int
snake_case : int
snake_case : float
snake_case : float
snake_case : Tuple[int]
def snake_case_ (self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def snake_case_ (self ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def snake_case_ (self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = torch.arange(self.height * self.width )
_UpperCAmelCase : Optional[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCAmelCase__ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def snake_case_ (self ):
_UpperCAmelCase , *_UpperCAmelCase : Dict = self.shape
_UpperCAmelCase : Union[str, Any] = int(np.prod(lowerCAmelCase__ ) )
_UpperCAmelCase : List[str] = self.get_image_coords()
_UpperCAmelCase : Optional[int] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_UpperCAmelCase : Optional[Any] = self.get_camera_rays(lowerCAmelCase__ )
_UpperCAmelCase : Any = rays.view(lowerCAmelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase , *_UpperCAmelCase , _UpperCAmelCase : str = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_UpperCAmelCase : List[Any] = coords.view(lowerCAmelCase__ , -1 , 2 )
_UpperCAmelCase : Union[str, Any] = self.resolution()
_UpperCAmelCase : Union[str, Any] = self.fov()
_UpperCAmelCase : List[Any] = (flat.float() / (res - 1)) * 2 - 1
_UpperCAmelCase : List[Any] = fracs * torch.tan(fov / 2 )
_UpperCAmelCase : Union[str, Any] = fracs.view(lowerCAmelCase__ , -1 , 2 )
_UpperCAmelCase : List[str] = (
self.z.view(lowerCAmelCase__ , 1 , 3 )
+ self.x.view(lowerCAmelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCAmelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
_UpperCAmelCase : Tuple = directions / directions.norm(dim=-1 , keepdim=lowerCAmelCase__ )
_UpperCAmelCase : str = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCAmelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCAmelCase__ , *lowerCAmelCase__ , 2 , 3 )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCAmelCase__ , height=lowerCAmelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Any = []
_UpperCAmelCase : Union[str, Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_UpperCAmelCase : Optional[int] = np.array([np.sin(lowerCAmelCase_ ), np.cos(lowerCAmelCase_ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_UpperCAmelCase : str = -z * 4
_UpperCAmelCase : Union[str, Any] = np.array([np.cos(lowerCAmelCase_ ), -np.sin(lowerCAmelCase_ ), 0.0] )
_UpperCAmelCase : Optional[int] = np.cross(lowerCAmelCase_ , lowerCAmelCase_ )
origins.append(lowerCAmelCase_ )
xs.append(lowerCAmelCase_ )
ys.append(lowerCAmelCase_ )
zs.append(lowerCAmelCase_ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCAmelCase_ , axis=0 ) ).float() , width=lowerCAmelCase_ , height=lowerCAmelCase_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase_ )) , )
| 170 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ )
else:
return _interleave_iterable_datasets(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , ):
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
else:
return _concatenate_iterable_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
| 170 | 1 |
# Lint as: python3
import itertools
import os
import re
lowerCAmelCase__ = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCAmelCase__ = re.compile(R'([a-z\d])([A-Z])')
lowerCAmelCase__ = re.compile(R'(?<!_)_(?!_)')
lowerCAmelCase__ = re.compile(R'(_{2,})')
lowerCAmelCase__ = R'^\w+(\.\w+)*$'
lowerCAmelCase__ = R'<>:/\|?*'
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
_A : str = _uppercase_uppercase_re.sub(r"\1_\2" , UpperCamelCase__ )
_A : List[Any] = _lowercase_uppercase_re.sub(r"\1_\2" , UpperCamelCase__ )
return name.lower()
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A : Optional[int] = _single_underscore_re.split(UpperCamelCase__ )
_A : List[Any] = [_multiple_underscores_re.split(UpperCamelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase__ ) if n != "" )
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict ):
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , UpperCamelCase__ ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(UpperCamelCase__ )}-{split}"
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=None ):
_A : Optional[int] = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
if filetype_suffix:
prefix += f".{filetype_suffix}"
_A : Optional[int] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
return f"{filepath}*"
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None ):
_A : Tuple = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
_A : List[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if shard_lengths:
_A : List[Any] = len(UpperCamelCase__ )
_A : Union[str, Any] = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(UpperCamelCase__ )]
if filetype_suffix:
_A : Union[str, Any] = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
_A : Optional[Any] = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 11 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase : List[Any] = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
UpperCAmelCase : Optional[int] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
UpperCAmelCase : Optional[int] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_INIT_CONFIGURATION
_lowercase : Union[str, Any] = FunnelTokenizer
_lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : int = 2
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Optional[Any] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
a__ : List[str] =getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
a__ : Union[str, Any] =do_lower_case
a__ : Any =strip_accents
a__ : Optional[Any] =tokenize_chinese_chars
a__ : Dict =normalizer_class(**lowerCAmelCase__ )
a__ : Any =do_lower_case
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str:
'''simple docstring'''
a__ : Dict =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
a__ : Optional[int] =[self.sep_token_id]
a__ : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
a__ : Tuple =self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 95 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A : Any = ["gpt2"]
__A : Optional[int] = "gpt2"
if is_tf_available():
class _a ( tf.Module):
"""simple docstring"""
def __init__( self : List[str] , __UpperCamelCase : Dict )->Any:
super().__init__()
_UpperCAmelCase = tokenizer
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = TFGPTaLMHeadModel.from_config(__UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Optional[Any] )->Optional[Any]:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase )
_UpperCAmelCase = tokenized['''input_ids'''].to_tensor()
_UpperCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_UpperCAmelCase = self.model(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Any )->List[str]:
super().setUp()
_UpperCAmelCase = [GPTaTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_UpperCAmelCase = [TFGPTaTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_UpperCAmelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
_UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowercase__ ( self : Optional[int] )->List[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_UpperCAmelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
_UpperCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_UpperCAmelCase = python_outputs[key].numpy()
_UpperCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def lowercase__ ( self : Tuple )->int:
for tf_tokenizer in self.tf_tokenizers:
_UpperCAmelCase = tf.function(__UpperCamelCase )
for test_inputs in self.test_sentences:
_UpperCAmelCase = tf.constant(__UpperCamelCase )
_UpperCAmelCase = compiled_tokenizer(__UpperCamelCase )
_UpperCAmelCase = tf_tokenizer(__UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowercase__ ( self : List[Any] )->int:
for tf_tokenizer in self.tf_tokenizers:
_UpperCAmelCase = ModelToSave(tokenizer=__UpperCamelCase )
_UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
_UpperCAmelCase = model.serving(__UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_UpperCAmelCase = Path(__UpperCamelCase ) / '''saved.model'''
tf.saved_model.save(__UpperCamelCase , __UpperCamelCase , signatures={'''serving_default''': model.serving} )
_UpperCAmelCase = tf.saved_model.load(__UpperCamelCase )
_UpperCAmelCase = loaded_model.signatures['''serving_default'''](__UpperCamelCase )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowercase__ ( self : Any )->List[str]:
for tf_tokenizer in self.tf_tokenizers:
_UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
_UpperCAmelCase = tf_tokenizer(__UpperCamelCase ) # Build model with some sample inputs
_UpperCAmelCase = tf_tokenizer.get_config()
_UpperCAmelCase = TFGPTaTokenizer.from_config(__UpperCamelCase )
_UpperCAmelCase = model_from_config(__UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowercase__ ( self : str )->Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_UpperCAmelCase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
_UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
_UpperCAmelCase = tf_tokenizer(__UpperCamelCase , max_length=__UpperCamelCase )
_UpperCAmelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 326 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 326 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = '''▁'''
UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
UpperCamelCase = {
'''google/pegasus-xsum''': 512,
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] = PegasusTokenizer
UpperCamelCase_ : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="</s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<mask_2>" , SCREAMING_SNAKE_CASE_ : List[Any]="<mask_1>" , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=1_03 , **SCREAMING_SNAKE_CASE_ : Any , ) -> Dict:
'''simple docstring'''
A: Tuple = offset
if additional_special_tokens is not None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE_ )}, but is"""
f""" {type(SCREAMING_SNAKE_CASE_ )}""" )
A: str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(SCREAMING_SNAKE_CASE_ ) , self.offset - 1 )
]
if len(set(SCREAMING_SNAKE_CASE_ ) ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
A: List[Any] = additional_special_tokens_extended
else:
A: Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , mask_token_sent=SCREAMING_SNAKE_CASE_ , offset=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Optional[int] = vocab_file
A: Dict = False if not self.vocab_file else True
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> str:
'''simple docstring'''
A: List[str] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List , SCREAMING_SNAKE_CASE_ : Optional[List] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(SCREAMING_SNAKE_CASE_ )
elif token_ids_a is None:
return self._special_token_mask(SCREAMING_SNAKE_CASE_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 319 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase = tuple[int, int]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
A: Any = []
A: int = set()
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
A: Optional[int] = []
((A) , (A)): str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)): int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
A: str = []
((A) , (A)): List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.elements[0][1]
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
((A) , (A)): Dict = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
# euclidean distance
A: List[str] = np.array(__lowercase )
A: Optional[int] = np.array(__lowercase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# integer division by time variable
return consistent_heuristic(__lowercase , __lowercase ) // t
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase )
return ans
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = np.chararray((n, n) )
for i in range(__lowercase ):
for j in range(__lowercase ):
A: Union[str, Any] = '''*'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (j, (n - 1) - i) in blocks:
A: Optional[Any] = '''#'''
A: Tuple = '''-'''
A: List[str] = back_pointer[goal]
while x != start:
((A) , (A)): Tuple = x
# print(x)
A: List[str] = '''-'''
A: str = back_pointer[x]
A: Dict = '''-'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A: List[str] = back_pointer[goal]
while x != start:
print(__lowercase , end=''' ''' )
A: Optional[int] = back_pointer[x]
print(__lowercase )
sys.exit()
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
for itera in range(__lowercase ):
open_list[itera].remove_element(__lowercase )
# print("s", s)
# print("j", j)
((A) , (A)): Tuple = s
A: Optional[Any] = (x - 1, y)
A: str = (x + 1, y)
A: List[Any] = (x, y + 1)
A: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowercase )
A: int = -1
A: int = float('''inf''' )
if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1:
A: List[str] = g_function[s] + 1
A: List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowercase ):
if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key(
__lowercase , 0 , __lowercase , __lowercase ):
open_list[j].put(
__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: str = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase = make_common_ground()
UpperCamelCase = blocks_blk
# hyper parameters
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 20
UpperCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase = (0, 0)
UpperCamelCase = (n - 1, n - 1)
UpperCamelCase = 1
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: int = {start: 0, goal: float('''inf''' )}
A: Union[str, Any] = {start: -1, goal: -1}
A: List[Any] = []
A: Union[str, Any] = set()
for i in range(__lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
A: list[int] = []
A: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A , A: Union[str, Any] = open_list[i].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_inad.append(__lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A: Union[str, Any] = open_list[0].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_anchor.append(__lowercase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowercase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 319 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = list(range(len(UpperCamelCase__ ) ) )
_a : Union[str, Any] = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_a : float = 0
_a : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_a : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_a : List[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_snake_case = HUGGINGFACE_HUB_CACHE
_snake_case = 'config.json'
_snake_case = 'diffusion_pytorch_model.bin'
_snake_case = 'diffusion_flax_model.msgpack'
_snake_case = 'model.onnx'
_snake_case = 'diffusion_pytorch_model.safetensors'
_snake_case = 'weights.pb'
_snake_case = 'https://huggingface.co'
_snake_case = default_cache_path
_snake_case = 'diffusers_modules'
_snake_case = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
_snake_case = ['fp16', 'non-ema']
_snake_case = '.self_attn'
| 324 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: str ) -> str:
snake_case_ :Tuple = tempfile.mkdtemp()
snake_case_ :Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
snake_case_ :List[str] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
snake_case_ :List[str] = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(snake_case , snake_case )
def lowerCAmelCase_ ( self: Dict , **snake_case: Any ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCAmelCase_ ( self: Optional[Any] , **snake_case: List[str] ) -> Union[str, Any]:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def lowerCAmelCase_ ( self: Optional[Any] , **snake_case: Dict ) -> Any:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self: List[Any] ) -> int:
snake_case_ :Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ :Dict = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ ( self: List[Any] ) -> Any:
snake_case_ :Optional[Any] = self.get_tokenizer()
snake_case_ :Tuple = self.get_rust_tokenizer()
snake_case_ :Optional[int] = self.get_image_processor()
snake_case_ :List[Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ :Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
snake_case_ :int = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ :int = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def lowerCAmelCase_ ( self: str ) -> int:
snake_case_ :List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ :List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ :Optional[Any] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
snake_case_ :int = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case_ :Tuple = self.get_image_processor()
snake_case_ :List[str] = self.get_tokenizer()
snake_case_ :int = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ :Optional[Any] = self.prepare_image_inputs()
snake_case_ :Optional[int] = image_processor(snake_case , return_tensors="""np""" )
snake_case_ :Optional[Any] = processor(images=snake_case , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
snake_case_ :List[str] = self.get_image_processor()
snake_case_ :List[Any] = self.get_tokenizer()
snake_case_ :Dict = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ :Optional[Any] = """lower newer"""
snake_case_ :int = processor(text=snake_case )
snake_case_ :Optional[Any] = tokenizer(snake_case , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case_ :Any = self.get_image_processor()
snake_case_ :Optional[int] = self.get_tokenizer()
snake_case_ :Union[str, Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ :Dict = """lower newer"""
snake_case_ :Dict = self.prepare_image_inputs()
snake_case_ :Dict = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case_ :Optional[Any] = self.get_image_processor()
snake_case_ :Optional[Any] = self.get_tokenizer()
snake_case_ :str = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ :Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ :int = processor.batch_decode(snake_case )
snake_case_ :List[str] = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def lowerCAmelCase_ ( self: List[Any] ) -> List[str]:
snake_case_ :List[Any] = self.get_image_processor()
snake_case_ :str = self.get_tokenizer()
snake_case_ :Optional[Any] = AlignProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ :List[Any] = """lower newer"""
snake_case_ :Dict = self.prepare_image_inputs()
snake_case_ :int = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 66 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase ( _SCREAMING_SNAKE_CASE : Features ):
'''simple docstring'''
_UpperCAmelCase = np.inf
def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None:
nonlocal batch_size
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary":
_UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return None if batch_size is np.inf else batch_size
class _a ( lowerCAmelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]:
super().__init__(
__UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths}
_UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
_UpperCAmelCase = Parquet(
cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , )
def lowercase__ ( self : Union[str, Any] )->Dict:
# Build iterable dataset
if self.streaming:
_UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , )
_UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
class _a :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]:
_UpperCAmelCase = dataset
_UpperCAmelCase = path_or_buf
_UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features )
_UpperCAmelCase = parquet_writer_kwargs
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
_UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs )
else:
_UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs )
return written
def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int:
_UpperCAmelCase = 0
_UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase )
_UpperCAmelCase = self.dataset.features.arrow_schema
_UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
_UpperCAmelCase = query_table(
table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__UpperCamelCase )
written += batch.nbytes
writer.close()
return written
| 260 | 0 |
def _UpperCamelCase ( UpperCamelCase_ : int ) -> list:
"""simple docstring"""
lowerCAmelCase__ = int(UpperCamelCase_ )
if n_element < 1:
lowerCAmelCase__ = ValueError('a should be a positive number' )
raise my_error
lowerCAmelCase__ = [1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (0, 0, 0)
lowerCAmelCase__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__snake_case : List[Any] = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__snake_case : Union[str, Any] = hamming(int(n))
print("""-----------------------------------------------------""")
print(f'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 122 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : int = KandinskyVaaInpaintPipeline
_SCREAMING_SNAKE_CASE : int = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_SCREAMING_SNAKE_CASE : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_SCREAMING_SNAKE_CASE : Optional[Any] = False
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1_00
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=_UpperCamelCase , )
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
lowerCAmelCase__ = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase__ = 0
if str(_UpperCamelCase ).startswith('mps' ):
lowerCAmelCase__ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase__ = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu'
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**_UpperCamelCase )
lowerCAmelCase__ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase__ = np.ones((7_68, 7_68) , dtype=np.floataa )
lowerCAmelCase__ = 0
lowerCAmelCase__ = 'a hat'
lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
lowerCAmelCase__ = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ = pipeline(
image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 122 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = '''summarization'''
_snake_case : Optional[int] = ['''loss''']
_snake_case : str = ROUGE_KEYS
_snake_case : Tuple = '''rouge2'''
def __init__( self , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase_ : Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(_UpperCamelCase , num_labels=_UpperCamelCase , mode=self.mode , **_UpperCamelCase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
UpperCAmelCase_ : Optional[int] = Path(self.output_dir ) / 'metrics.json'
UpperCAmelCase_ : str = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Optional[int] = defaultdict(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = self.config.model_type
UpperCAmelCase_ : Any = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
UpperCAmelCase_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase_ : Tuple = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
UpperCAmelCase_ : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase_ : str = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCAmelCase_ : Any = get_git_info()['repo_sha']
UpperCAmelCase_ : int = hparams.num_workers
UpperCAmelCase_ : List[Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _UpperCamelCase ):
UpperCAmelCase_ : Dict = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase_ : Dict = self.decoder_start_token_id
UpperCAmelCase_ : List[Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
UpperCAmelCase_ : str = False
UpperCAmelCase_ : List[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase_ : Optional[int] = self.hparams.eval_max_gen_length
else:
UpperCAmelCase_ : Optional[Any] = self.model.config.max_length
UpperCAmelCase_ : List[str] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict[str, List[str]]:
UpperCAmelCase_ : Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(_UpperCamelCase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
UpperCAmelCase_ : Union[str, Any] = True
return readable_batch
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
return self.model(_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Dict = self.tokenizer.batch_decode(
_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
return lmap(str.strip , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : int = self.tokenizer.pad_token_id
UpperCAmelCase_ , UpperCAmelCase_ : Dict = batch['input_ids'], batch['attention_mask']
UpperCAmelCase_ : int = batch['labels']
if isinstance(self.model , _UpperCamelCase ):
UpperCAmelCase_ : str = self.model._shift_right(_UpperCamelCase )
else:
UpperCAmelCase_ : str = shift_tokens_right(_UpperCamelCase , _UpperCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase_ : Any = decoder_input_ids
self.save_readable_batch(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self(_UpperCamelCase , attention_mask=_UpperCamelCase , decoder_input_ids=_UpperCamelCase , use_cache=_UpperCamelCase )
UpperCAmelCase_ : Tuple = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase_ : Any = nn.CrossEntropyLoss(ignore_index=_UpperCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase_ : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCAmelCase_ : Optional[Any] = nn.functional.log_softmax(_UpperCamelCase , dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = label_smoothed_nll_loss(
_UpperCamelCase , _UpperCamelCase , self.hparams.label_smoothing , ignore_index=_UpperCamelCase )
return (loss,)
@property
def __UpperCAmelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : List[Any] = self._step(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = dict(zip(self.loss_names , _UpperCamelCase ) )
# tokens per batch
UpperCAmelCase_ : Tuple = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
UpperCAmelCase_ : Dict = batch['input_ids'].shape[0]
UpperCAmelCase_ : Optional[int] = batch['input_ids'].eq(self.pad ).sum()
UpperCAmelCase_ : List[str] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return self._generative_step(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase="val" ) -> Dict:
self.step_count += 1
UpperCAmelCase_ : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCAmelCase_ : Optional[Any] = losses['loss']
UpperCAmelCase_ : Tuple = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
UpperCAmelCase_ : Tuple = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase_ : torch.FloatTensor = torch.tensor(_UpperCamelCase ).type_as(_UpperCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_UpperCamelCase )
UpperCAmelCase_ : str = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
UpperCAmelCase_ : Optional[int] = self.step_count
self.metrics[prefix].append(_UpperCamelCase ) # callback writes this to self.metrics_save_path
UpperCAmelCase_ : int = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return calculate_rouge(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> dict:
UpperCAmelCase_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase_ : int = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=_UpperCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase_ : Optional[Any] = (time.time() - ta) / batch['input_ids'].shape[0]
UpperCAmelCase_ : List[str] = self.ids_to_clean_text(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.ids_to_clean_text(batch['labels'] )
UpperCAmelCase_ : str = self._step(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = dict(zip(self.loss_names , _UpperCamelCase ) )
UpperCAmelCase_ : Dict = self.calc_generative_metrics(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = np.mean(lmap(_UpperCamelCase , _UpperCamelCase ) )
base_metrics.update(gen_time=_UpperCamelCase , gen_len=_UpperCamelCase , preds=_UpperCamelCase , target=_UpperCamelCase , **_UpperCamelCase )
return base_metrics
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
return self._generative_step(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
return self.validation_epoch_end(_UpperCamelCase , prefix='test' )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> SeqaSeqDataset:
UpperCAmelCase_ : int = self.n_obs[type_path]
UpperCAmelCase_ : List[str] = self.target_lens[type_path]
UpperCAmelCase_ : List[str] = self.dataset_class(
self.tokenizer , type_path=_UpperCamelCase , n_obs=_UpperCamelCase , max_target_length=_UpperCamelCase , **self.dataset_kwargs , )
return dataset
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ) -> DataLoader:
UpperCAmelCase_ : List[str] = self.get_dataset(_UpperCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase_ : Optional[Any] = dataset.make_sortish_sampler(_UpperCamelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCamelCase , num_workers=self.num_workers , sampler=_UpperCamelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase_ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCamelCase , batch_sampler=_UpperCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCamelCase , num_workers=self.num_workers , sampler=_UpperCamelCase , )
def __UpperCAmelCase ( self ) -> DataLoader:
UpperCAmelCase_ : List[Any] = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=_UpperCamelCase )
return dataloader
def __UpperCAmelCase ( self ) -> DataLoader:
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def __UpperCAmelCase ( self ) -> DataLoader:
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
BaseTransformer.add_model_specific_args(_UpperCamelCase , _UpperCamelCase )
add_generic_args(_UpperCamelCase , _UpperCamelCase )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=_UpperCamelCase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=_UpperCamelCase )
parser.add_argument('--max_tokens_per_batch' , type=_UpperCamelCase , default=_UpperCamelCase )
parser.add_argument('--logger_name' , type=_UpperCamelCase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=_UpperCamelCase , default=5_0_0 , required=_UpperCamelCase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=_UpperCamelCase , default='summarization' , required=_UpperCamelCase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=_UpperCamelCase , default=0.0 , required=_UpperCamelCase )
parser.add_argument('--src_lang' , type=_UpperCamelCase , default='' , required=_UpperCamelCase )
parser.add_argument('--tgt_lang' , type=_UpperCamelCase , default='' , required=_UpperCamelCase )
parser.add_argument('--eval_beams' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase )
parser.add_argument(
'--val_metric' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=_UpperCamelCase , default=_UpperCamelCase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=_UpperCamelCase , default=1 , required=_UpperCamelCase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : int = '''translation'''
_snake_case : Dict = ['''loss''']
_snake_case : str = ['''bleu''']
_snake_case : str = '''bleu'''
def __init__( self , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
super().__init__(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : List[str] = hparams.src_lang
UpperCAmelCase_ : List[str] = hparams.tgt_lang
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> dict:
return calculate_bleu(_UpperCamelCase , _UpperCamelCase )
def lowercase__ ( __snake_case : Dict , __snake_case : List[Any]=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__snake_case )
check_output_dir(__snake_case , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase_ : SummarizationModule = SummarizationModule(__snake_case )
else:
UpperCAmelCase_ : SummarizationModule = TranslationModule(__snake_case )
UpperCAmelCase_ : List[str] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
UpperCAmelCase_ : Dict = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ : Optional[Any] = os.environ.get('WANDB_PROJECT' , __snake_case )
UpperCAmelCase_ : int = WandbLogger(name=model.output_dir.name , project=__snake_case )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ : str = WandbLogger(name=model.output_dir.name , project=F"hf_{dataset}" )
if args.early_stopping_patience >= 0:
UpperCAmelCase_ : Any = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Dict = args.val_metric == 'loss'
UpperCAmelCase_ : pl.Trainer = generic_train(
__snake_case , __snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __snake_case ) , early_stopping_callback=__snake_case , logger=__snake_case , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
UpperCAmelCase_ : Tuple = ''
UpperCAmelCase_ : List[Any] = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=__snake_case ) )
if checkpoints:
UpperCAmelCase_ : int = checkpoints[-1]
UpperCAmelCase_ : Optional[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 29 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_A : List[Any] = logging.get_logger(__name__)
class a__ ( a_ ):
__lowerCAmelCase = ["""pixel_values"""]
def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = True , **_a , ):
super().__init__(**_a )
lowercase : Optional[Any] = size if size is not None else {"shortest_edge": 224}
lowercase : List[Any] = get_size_dict(_a , default_to_square=_a )
lowercase : str = crop_size if crop_size is not None else {"height": 256, "width": 256}
lowercase : List[str] = get_size_dict(_a , param_name="crop_size" )
lowercase : int = do_resize
lowercase : Optional[int] = size
lowercase : str = resample
lowercase : List[Any] = do_rescale
lowercase : Union[str, Any] = rescale_factor
lowercase : Optional[int] = do_center_crop
lowercase : Union[str, Any] = crop_size
lowercase : Optional[Any] = do_flip_channel_order
def __magic_name__ ( self , _a , _a , _a = PIL.Image.BILINEAR , _a = None , **_a , ):
lowercase : List[Any] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase : Union[str, Any] = get_resize_output_image_size(_a , size=size["shortest_edge"] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a , _a = None , **_a , ):
lowercase : str = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a , _a = None , **_a , ):
return rescale(_a , scale=_a , data_format=_a , **_a )
def __magic_name__ ( self , _a , _a = None ):
return flip_channel_order(_a , data_format=_a )
def __magic_name__ ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ):
lowercase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase : Tuple = resample if resample is not None else self.resample
lowercase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : Optional[int] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_a , default_to_square=_a )
lowercase : int = crop_size if crop_size is not None else self.crop_size
lowercase : Any = get_size_dict(_a , param_name="crop_size" )
lowercase : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
lowercase : Any = [to_numpy_array(_a ) for image in images]
if do_resize:
lowercase : Optional[int] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
lowercase : str = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
lowercase : Union[str, Any] = [self.rescale(image=_a , scale=_a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase : int = [self.flip_channel_order(image=_a ) for image in images]
lowercase : int = [to_channel_dimension_format(_a , _a ) for image in images]
lowercase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=_a , tensor_type=_a )
def __magic_name__ ( self , _a , _a = None ):
lowercase : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_a ):
lowercase : Tuple = target_sizes.numpy()
lowercase : List[Any] = []
for idx in range(len(_a ) ):
lowercase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_a )
lowercase : Tuple = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
lowercase : str = logits.argmax(dim=1 )
lowercase : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 202 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : set ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : List[str] =len(UpperCAmelCase ), len(grid[0] )
if (
min(UpperCAmelCase , UpperCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCamelCase__ : Tuple =0
count += depth_first_search(UpperCAmelCase , row + 1 , UpperCAmelCase , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , row - 1 , UpperCAmelCase , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , UpperCAmelCase , col + 1 , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , UpperCAmelCase , col - 1 , UpperCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
_SCREAMING_SNAKE_CASE : Optional[int] = """path-to-your-trained-model"""
_SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
_SCREAMING_SNAKE_CASE : Dict = """A photo of sks dog in a bucket"""
_SCREAMING_SNAKE_CASE : Any = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 157 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return number | (1 << position)
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return number & ~(1 << position)
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return number ^ (1 << position)
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return ((number >> position) & 1) == 1
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase ( A_ , A_ , A_ = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(A_ ), magnitude * sin(A_ )]
return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )]
def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool:
'''simple docstring'''
a : NDArray[floataa] = cross(A_ , A_ )
a : float = sum(A_ )
return abs(A_ ) < eps
if __name__ == "__main__":
# Test to check if it works
__lowercase = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__lowercase = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
__lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 40 | 0 |
lowerCAmelCase_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
lowerCAmelCase_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def lowerCamelCase_ ( lowerCAmelCase: float , lowerCAmelCase: str , lowerCAmelCase: str )-> float:
_snake_case : Any = from_type.lower().strip('s' )
_snake_case : Optional[int] = to_type.lower().strip('s' )
_snake_case : Any = UNIT_SYMBOL.get(lowerCAmelCase , lowerCAmelCase )
_snake_case : Optional[Any] = UNIT_SYMBOL.get(lowerCAmelCase , lowerCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
_snake_case : int = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(lowerCAmelCase )}"""
)
raise ValueError(lowerCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
_snake_case : int = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(lowerCAmelCase )}"""
)
raise ValueError(lowerCAmelCase )
_snake_case : Dict = METRIC_CONVERSION[from_sanitized]
_snake_case : Optional[Any] = METRIC_CONVERSION[to_sanitized]
_snake_case : Optional[int] = 1
if from_exponent > to_exponent:
_snake_case : Any = from_exponent - to_exponent
else:
_snake_case : Optional[int] = -(to_exponent - from_exponent)
return value * pow(10 , lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 260 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase_ ( lowerCAmelCase: Features )-> Optional[int]:
_snake_case : str = np.inf
def set_batch_size(lowerCAmelCase: FeatureType ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : List[str] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
_snake_case : Union[str, Any] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase , lowerCAmelCase ) and feature.dtype == "binary":
_snake_case : Union[str, Any] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase , lowerCAmelCase )
return None if batch_size is np.inf else batch_size
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : NestedDataStructureLike[PathLike] , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : Optional[Features] = None , UpperCamelCase : str = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , num_proc=UpperCamelCase , **UpperCamelCase , )
_snake_case : Tuple = path_or_paths if isinstance(UpperCamelCase , UpperCamelCase ) else {self.split: path_or_paths}
_snake_case : List[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
_snake_case : Optional[Any] = Parquet(
cache_dir=UpperCamelCase , data_files=UpperCamelCase , features=UpperCamelCase , hash=UpperCamelCase , **UpperCamelCase , )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.streaming:
_snake_case : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_snake_case : Dict = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = None
self.builder.download_and_prepare(
download_config=UpperCamelCase , download_mode=UpperCamelCase , verification_mode=UpperCamelCase , base_path=UpperCamelCase , num_proc=self.num_proc , )
_snake_case : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase : Dataset , UpperCamelCase : Union[PathLike, BinaryIO] , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
_snake_case : Tuple = dataset
_snake_case : Union[str, Any] = path_or_buf
_snake_case : List[Any] = batch_size or get_writer_batch_size(dataset.features )
_snake_case : Optional[Any] = parquet_writer_kwargs
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
_snake_case : Any = self._write(file_obj=UpperCamelCase , batch_size=UpperCamelCase , **self.parquet_writer_kwargs )
else:
_snake_case : Tuple = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase , **self.parquet_writer_kwargs )
return written
def UpperCamelCase_ ( self : Dict , UpperCamelCase : BinaryIO , UpperCamelCase : int , **UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : List[str] = 0
_snake_case : Dict = parquet_writer_kwargs.pop('path_or_buf' , UpperCamelCase )
_snake_case : Optional[Any] = self.dataset.features.arrow_schema
_snake_case : str = pq.ParquetWriter(UpperCamelCase , schema=UpperCamelCase , **UpperCamelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , UpperCamelCase ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
_snake_case : Tuple = query_table(
table=self.dataset._data , key=slice(UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(UpperCamelCase )
written += batch.nbytes
writer.close()
return written
| 260 | 1 |
from __future__ import annotations
__snake_case :Dict = list[tuple[int, int]]
__snake_case :Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case :str = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _A :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Node | None , ):
'''simple docstring'''
__a = pos_x
__a = pos_y
__a = (pos_y, pos_x)
__a = goal_x
__a = goal_y
__a = g_cost
__a = parent
__a = self.calculate_heuristic()
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = abs(self.pos_x - self.goal_x)
__a = abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return self.f_cost < other.f_cost
class _A :
def __init__( self : str , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[int, int]):
'''simple docstring'''
__a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __SCREAMING_SNAKE_CASE)
__a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __SCREAMING_SNAKE_CASE)
__a = [self.start]
__a = []
__a = False
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__a = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
__a = True
return self.retrace_path(__SCREAMING_SNAKE_CASE)
self.closed_nodes.append(__SCREAMING_SNAKE_CASE)
__a = self.get_successors(__SCREAMING_SNAKE_CASE)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__SCREAMING_SNAKE_CASE)
else:
# retrieve the best current path
__a = self.open_nodes.pop(self.open_nodes.index(__SCREAMING_SNAKE_CASE))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__SCREAMING_SNAKE_CASE)
else:
self.open_nodes.append(__SCREAMING_SNAKE_CASE)
if not self.reached:
return [self.start.pos]
return None
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Node):
'''simple docstring'''
__a = []
for action in delta:
__a = parent.pos_x + action[1]
__a = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(__SCREAMING_SNAKE_CASE) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __SCREAMING_SNAKE_CASE , ))
return successors
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Node | None):
'''simple docstring'''
__a = node
__a = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__a = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__snake_case :List[str] = (0, 0)
__snake_case :Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
__snake_case :int = GreedyBestFirst(init, goal)
__snake_case :List[str] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__snake_case :Optional[Any] = 2
for elem in grid:
print(elem)
| 49 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __snake_case ( _UpperCAmelCase = "isbn/0140328726" ):
__a = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__a = f'{olid} is not a valid Open Library olid'
raise ValueError(_UpperCAmelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def __snake_case ( _UpperCAmelCase ):
__a = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__a = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__a = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__a = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = ''', '''.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case :List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case :Optional[Any] = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 49 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: Union[str, Any]=13 , UpperCamelCase_: Optional[int]=7 , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: List[Any]=99 , UpperCamelCase_: Any=32 , UpperCamelCase_: int=5 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Union[str, Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Dict=50 , UpperCamelCase_: str=0.02 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: str=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = initializer_range
__lowerCamelCase = use_labels
__lowerCamelCase = scope
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase__ ( self: Tuple ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: Any ):
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = self.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: Any , UpperCamelCase_: Tuple , **UpperCamelCase_: Tuple , ):
__lowerCamelCase = BertGenerationEncoder(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple , **UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = True
__lowerCamelCase = BertGenerationEncoder(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] , **UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = BertGenerationDecoder(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval()
# first forward pass
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
__lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: str , UpperCamelCase_: List[Any] , *UpperCamelCase_: str , ):
__lowerCamelCase = BertGenerationDecoder(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
UpperCAmelCase__ : List[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = BertGenerationEncoderTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = """bert"""
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase = None
self.model_tester.create_and_check_model_as_decoder(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(UpperCamelCase_ )
@require_torch
class lowerCamelCase__( unittest.TestCase):
@slow
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
__lowerCamelCase = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
__lowerCamelCase = model(UpperCamelCase_ )[0]
__lowerCamelCase = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@require_torch
class lowerCamelCase__( unittest.TestCase):
@slow
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
__lowerCamelCase = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
__lowerCamelCase = model(UpperCamelCase_ )[0]
__lowerCamelCase = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 351 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: Tuple , *UpperCamelCase_: Dict , **UpperCamelCase_: Optional[int] ):
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , """decord""" )
self.check_model_type(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=None ):
__lowerCamelCase = {}
if frame_sampling_rate is not None:
__lowerCamelCase = frame_sampling_rate
if num_frames is not None:
__lowerCamelCase = num_frames
__lowerCamelCase = {}
if top_k is not None:
__lowerCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self: Any , UpperCamelCase_: Union[str, List[str]] , **UpperCamelCase_: str ):
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=1 ):
if num_frames is None:
__lowerCamelCase = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
__lowerCamelCase = BytesIO(requests.get(UpperCamelCase_ ).content )
__lowerCamelCase = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
__lowerCamelCase = 0
__lowerCamelCase = num_frames * frame_sampling_rate - 1
__lowerCamelCase = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
__lowerCamelCase = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
__lowerCamelCase = list(UpperCamelCase_ )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Any ):
__lowerCamelCase = self.model(**UpperCamelCase_ )
return model_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=5 ):
if top_k > self.model.config.num_labels:
__lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
__lowerCamelCase, __lowerCamelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__lowerCamelCase = scores.tolist()
__lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 29 | 0 |
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
if number > 0:
raise ValueError('input must be a negative integer' )
_UpperCAmelCase = len(bin(__lowerCAmelCase )[3:] )
_UpperCAmelCase = bin(abs(__lowerCAmelCase ) - (1 << binary_number_length) )[3:]
_UpperCAmelCase = (
(
'1'
+ '0' * (binary_number_length - len(__lowerCAmelCase ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class UpperCAmelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: Optional[int] = StableDiffusionControlNetImgaImgPipeline
_lowercase: Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowercase: str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase: Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
_lowercase: Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self : List[str] ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_lowerCAmelCase = CLIPTextModel(__snake_case )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Any , __snake_case : str , __snake_case : Any=0 ) -> str:
if str(__snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(__snake_case )
else:
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_lowerCAmelCase = 2
_lowerCAmelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , )
_lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(__snake_case ) ).to(__snake_case )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowercase__ ( self : Optional[int] ) -> List[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase__ ( self : Tuple ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowercase__ ( self : Tuple ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: Any = StableDiffusionControlNetImgaImgPipeline
_lowercase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
_lowercase: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase: Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__snake_case : Optional[Any] ):
if isinstance(__snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
_lowerCAmelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_lowerCAmelCase = CLIPTextModel(__snake_case )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] )
_lowerCAmelCase = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase__ ( self : Tuple , __snake_case : int , __snake_case : List[str]=0 ) -> Union[str, Any]:
if str(__snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(__snake_case )
else:
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_lowerCAmelCase = 2
_lowerCAmelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
]
_lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(__snake_case ) ).to(__snake_case )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((64, 64) )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def lowercase__ ( self : List[str] ) -> Dict:
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
_lowerCAmelCase = 10.0
_lowerCAmelCase = 4
_lowerCAmelCase = self.get_dummy_inputs(__snake_case )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__snake_case )[0]
_lowerCAmelCase = self.get_dummy_inputs(__snake_case )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_lowerCAmelCase = self.get_dummy_inputs(__snake_case )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_lowerCAmelCase = self.get_dummy_inputs(__snake_case )
_lowerCAmelCase = steps
_lowerCAmelCase = scale
_lowerCAmelCase = pipe(**__snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowercase__ ( self : int ) -> str:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowercase__ ( self : Optional[Any] ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowercase__ ( self : int ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : Union[str, Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Any:
_lowerCAmelCase = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
_lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__snake_case , controlnet=__snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase = """evil space-punk bird"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((5_12, 5_12) )
_lowerCAmelCase = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((5_12, 5_12) )
_lowerCAmelCase = pipe(
__snake_case , __snake_case , control_image=__snake_case , generator=__snake_case , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
_lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 70 | 0 |
import numpy as np
lowercase_ = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any ):
__snake_case : Optional[Any] = np.array(_lowerCAmelCase )
def snake_case__ ( self : Tuple , _lowerCAmelCase : str ):
__snake_case , __snake_case : Tuple = np.where(letter == self.SQUARE )
__snake_case : Tuple = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def snake_case__ ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
__snake_case : Optional[Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def snake_case__ ( self : Any , _lowerCAmelCase : str ):
__snake_case : Any = message.lower()
__snake_case : Optional[int] = message.replace(""" """ , """""" )
__snake_case : Optional[int] = message.replace("""j""" , """i""" )
__snake_case : Union[str, Any] = np.empty((2, len(_lowerCAmelCase )) )
for letter_index in range(len(_lowerCAmelCase ) ):
__snake_case : Tuple = self.letter_to_numbers(message[letter_index] )
__snake_case : Optional[Any] = numbers[0]
__snake_case : Optional[Any] = numbers[1]
__snake_case : Optional[int] = first_step.reshape(2 * len(_lowerCAmelCase ) )
__snake_case : List[Any] = """"""
for numbers_index in range(len(_lowerCAmelCase ) ):
__snake_case : str = int(second_step[numbers_index * 2] )
__snake_case : Union[str, Any] = int(second_step[(numbers_index * 2) + 1] )
__snake_case : Any = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Union[str, Any] = encoded_message + letter
return encoded_message
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : str ):
__snake_case : List[str] = message.lower()
message.replace(""" """ , """""" )
__snake_case : Optional[Any] = np.empty(2 * len(_lowerCAmelCase ) )
for letter_index in range(len(_lowerCAmelCase ) ):
__snake_case : Dict = self.letter_to_numbers(message[letter_index] )
__snake_case : Optional[int] = numbers[0]
__snake_case : int = numbers[1]
__snake_case : Union[str, Any] = first_step.reshape((2, len(_lowerCAmelCase )) )
__snake_case : Dict = """"""
for numbers_index in range(len(_lowerCAmelCase ) ):
__snake_case : Dict = int(second_step[0, numbers_index] )
__snake_case : Optional[int] = int(second_step[1, numbers_index] )
__snake_case : Union[str, Any] = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = decoded_message + letter
return decoded_message
| 20 | from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase__ = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCAmelCase__ = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ):
_A : Dict = SavedModel()
_A : int = []
with open(os.path.join(UpperCamelCase__ , "utils" , "tf_ops" , "onnx.json" ) ) as f:
_A : Optional[int] = json.load(UpperCamelCase__ )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(UpperCamelCase__ )] )
with open(UpperCamelCase__ , "rb" ) as f:
saved_model.ParseFromString(f.read() )
_A : List[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A : Any = sorted(UpperCamelCase__ )
_A : Any = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(UpperCamelCase__ )
if strict and len(UpperCamelCase__ ) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(UpperCamelCase__ ) > 0:
print(f"Found the following incompatible ops for the opset {opset}:" )
print(*UpperCamelCase__ , sep="\n" )
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
lowerCAmelCase__ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 11 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11 | 1 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __lowerCAmelCase ( __snake_case ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , **__magic_name__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ )
snake_case_ : Union[str, Any] = Sql(
cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Union[str, Any] = None
snake_case_ : Dict = None
snake_case_ : Dict = None
snake_case_ : List[str] = None
self.builder.download_and_prepare(
download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , )
# Build dataset for splits
snake_case_ : Any = self.builder.as_dataset(
split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> str:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
snake_case_ : List[str] = dataset
snake_case_ : Tuple = name
snake_case_ : Optional[int] = con
snake_case_ : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case_ : Dict = num_proc
snake_case_ : Dict = to_sql_kwargs
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ )
snake_case_ : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ )
snake_case_ : Any = self.to_sql_kwargs.pop('''index''' , a_ )
snake_case_ : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs )
return written
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = args
snake_case_ : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
snake_case_ : Dict = query_table(
table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case_ : Tuple = batch.to_pandas()
snake_case_ : str = df.to_sql(self.name , self.con , index=a_ , **a_ )
return num_rows or len(a_ )
def lowerCamelCase (self , __magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
snake_case_ : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 352 |
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
snake_case_ : List[str] = len(bin(_UpperCamelCase )[3:] )
snake_case_ : str = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
snake_case_ : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
set_seed(7_70)
_lowercase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
_lowercase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
_lowercase = os.path.dirname(os.path.abspath(__file__))
_lowercase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
_lowercase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def _snake_case ( snake_case__ : Tuple , snake_case__ : List[str]=False ):
A = model_type
if use_small:
key += "_small"
return os.path.join(snake_case__ , REMOTE_MODEL_PATHS[key]['file_name'] )
def _snake_case ( snake_case__ : Tuple , snake_case__ : Tuple ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
hf_hub_download(repo_id=snake_case__ , filename=snake_case__ , local_dir=snake_case__ )
def _snake_case ( snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Any=False , snake_case__ : List[str]="text" ):
if model_type == "text":
A = BarkSemanticModel
A = BarkSemanticConfig
A = BarkSemanticGenerationConfig
elif model_type == "coarse":
A = BarkCoarseModel
A = BarkCoarseConfig
A = BarkCoarseGenerationConfig
elif model_type == "fine":
A = BarkFineModel
A = BarkFineConfig
A = BarkFineGenerationConfig
else:
raise NotImplementedError()
A = F'{model_type}_small' if use_small else model_type
A = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(snake_case__ ):
logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['repo_id'] , model_info['file_name'] )
A = torch.load(snake_case__ , map_location=snake_case__ )
# this is a hack
A = checkpoint['model_args']
if "input_vocab_size" not in model_args:
A = model_args['vocab_size']
A = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
A = model_args.pop('n_head' )
A = model_args.pop('n_embd' )
A = model_args.pop('n_layer' )
A = ConfigClass(**checkpoint['model_args'] )
A = ModelClass(config=snake_case__ )
A = GenerationConfigClass()
A = model_generation_config
A = checkpoint['model']
# fixup checkpoint
A = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(snake_case__ ):
# replace part of the key with corresponding layer name in HF implementation
A = k[len(snake_case__ ) :]
for old_layer_name in new_layer_name_dict:
A = new_k.replace(snake_case__ , new_layer_name_dict[old_layer_name] )
A = state_dict.pop(snake_case__ )
A = set(state_dict.keys() ) - set(model.state_dict().keys() )
A = {k for k in extra_keys if not k.endswith('.attn.bias' )}
A = set(model.state_dict().keys() ) - set(state_dict.keys() )
A = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(snake_case__ ) != 0:
raise ValueError(F'extra keys found: {extra_keys}' )
if len(snake_case__ ) != 0:
raise ValueError(F'missing keys: {missing_keys}' )
model.load_state_dict(snake_case__ , strict=snake_case__ )
A = model.num_parameters(exclude_embeddings=snake_case__ )
A = checkpoint['best_val_loss'].item()
logger.info(F'model loaded: {round(n_params/1e6 , 1 )}M params, {round(snake_case__ , 3 )} loss' )
model.eval()
model.to(snake_case__ )
del checkpoint, state_dict
return model
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[Any]=False , snake_case__ : List[Any]="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
A = 'cpu' # do conversion on cpu
A = _get_ckpt_path(snake_case__ , use_small=snake_case__ )
A = _load_model(snake_case__ , snake_case__ , model_type=snake_case__ , use_small=snake_case__ )
# load bark initial model
A = _bark_load_model(snake_case__ , 'cpu' , model_type=snake_case__ , use_small=snake_case__ )
if model_type == "text":
A = bark_model['model']
if model.num_parameters(exclude_embeddings=snake_case__ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
A = 5
A = 10
if model_type in ["text", "coarse"]:
A = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
A = bark_model(snake_case__ )[0]
A = model(snake_case__ )
# take last logits
A = output_new_model_total.logits[:, [-1], :]
else:
A = 3
A = 8
A = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
A = model(snake_case__ , snake_case__ )
A = bark_model(snake_case__ , snake_case__ )
A = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : int , snake_case__ : Union[str, Any] , ):
A = os.path.join(snake_case__ , snake_case__ )
A = BarkSemanticConfig.from_pretrained(os.path.join(snake_case__ , 'config.json' ) )
A = BarkCoarseConfig.from_pretrained(os.path.join(snake_case__ , 'config.json' ) )
A = BarkFineConfig.from_pretrained(os.path.join(snake_case__ , 'config.json' ) )
A = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
A = BarkSemanticModel.from_pretrained(snake_case__ )
A = BarkCoarseModel.from_pretrained(snake_case__ )
A = BarkFineModel.from_pretrained(snake_case__ )
A = EncodecModel.from_pretrained('facebook/encodec_24khz' )
A = BarkConfig.from_sub_model_configs(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
A = BarkModel(snake_case__ )
A = semantic
A = coarseAcoustic
A = fineAcoustic
A = codec
A = bark_generation_config
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
bark.save_pretrained(snake_case__ , repo_id=snake_case__ , push_to_hub=snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
_lowercase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small) | 74 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''roformer'''
def __init__( self : Tuple ,A_ : Optional[int]=5_0000 ,A_ : Tuple=None ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Dict=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=1536 ,A_ : List[str]=2 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : Optional[int]=0 ,A_ : List[str]=False ,A_ : Tuple=True ,**A_ : List[str] ,) -> Dict:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size if embedding_size is None else embedding_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = rotary_value
A = use_cache
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 74 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : List[Any]= logging.get_logger(__name__)
class UpperCamelCase ( lowercase ):
def __init__(self : Optional[Any] , *_A : int , **_A : Optional[int]) -> None:
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , _A , )
super().__init__(*_A , **_A)
| 369 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_a : Tuple= logging.get_logger(__name__)
class UpperCamelCase ( lowercase ):
def __init__(self : int , *_A : str , **_A : List[str]) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _A , )
super().__init__(*_A , **_A)
| 95 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Any=7 , __lowercase : Dict=3 , __lowercase : Optional[int]=30 , __lowercase : Optional[int]=4_00 , __lowercase : Tuple=True , __lowercase : Optional[Any]=None , __lowercase : Any=True , __lowercase : Union[str, Any]=[0.5, 0.5, 0.5] , __lowercase : Any=[0.5, 0.5, 0.5] , __lowercase : Tuple=True , __lowercase : List[Any]=1 / 2_55 , __lowercase : int=True , ):
"""simple docstring"""
snake_case_ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_pad
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self : Any , __lowercase : Union[str, Any] , __lowercase : List[str]=False ):
"""simple docstring"""
if not batched:
snake_case_ = image_inputs[0]
if isinstance(lowerCAmelCase_ , Image.Image ):
snake_case_ = image.size
else:
snake_case_ = image.shape[1], image.shape[2]
if w < h:
snake_case_ = int(self.size["shortest_edge"] * h / w )
snake_case_ = self.size["""shortest_edge"""]
elif w > h:
snake_case_ = self.size["""shortest_edge"""]
snake_case_ = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ = self.size["""shortest_edge"""]
snake_case_ = self.size["""shortest_edge"""]
else:
snake_case_ = []
for image in image_inputs:
snake_case_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ = max(lowerCAmelCase_ , key=lambda __lowercase : item[0] )[0]
snake_case_ = max(lowerCAmelCase_ , key=lambda __lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = YolosImageProcessor if is_vision_available() else None
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
snake_case_ = YolosImageProcessingTester(self )
@property
def snake_case__ ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "size" ) )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase_ )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase_ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase_ )
def snake_case__ ( self : str ):
"""simple docstring"""
pass
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ = self.image_processor_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
snake_case_ = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ = self.image_processor_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
snake_case_ = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_ = self.image_processor_tester.get_expected_values(lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values
snake_case_ = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
snake_case_ = self.image_processing_class(do_resize=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , do_rescale=lowerCAmelCase_ )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ = image_processing_a.pad(lowerCAmelCase_ , return_tensors="pt" )
snake_case_ = image_processing_a(lowerCAmelCase_ , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
snake_case_ = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
snake_case_ = image_processing(images=lowerCAmelCase_ , annotations=lowerCAmelCase_ , return_tensors="pt" )
# verify pixel values
snake_case_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase_ )
snake_case_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
# verify area
snake_case_ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase_ ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase_ )
snake_case_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase_ , atol=1E-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase_ ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase_ ) )
# verify class_labels
snake_case_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase_ ) )
# verify orig_size
snake_case_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase_ ) )
# verify size
snake_case_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase_ ) )
@slow
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ = json.loads(f.read() )
snake_case_ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
snake_case_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ = YolosImageProcessor(format="coco_panoptic" )
snake_case_ = image_processing(images=lowerCAmelCase_ , annotations=lowerCAmelCase_ , masks_path=lowerCAmelCase_ , return_tensors="pt" )
# verify pixel values
snake_case_ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase_ )
snake_case_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
# verify area
snake_case_ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase_ ) )
# verify boxes
snake_case_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase_ )
snake_case_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase_ , atol=1E-3 ) )
# verify image_id
snake_case_ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase_ ) )
# verify is_crowd
snake_case_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase_ ) )
# verify class_labels
snake_case_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase_ ) )
# verify masks
snake_case_ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase_ )
# verify orig_size
snake_case_ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase_ ) )
# verify size
snake_case_ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase_ ) )
| 187 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__snake_case : Optional[int] = None
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__snake_case : List[Any] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
__snake_case : List[str] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
__snake_case : Optional[Any] = '▁'
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ['input_ids', 'token_type_ids']
__snake_case = FNetTokenizer
def __init__( self : Dict , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : Tuple="[SEP]" , lowerCAmelCase_ : Optional[int]="<pad>" , lowerCAmelCase_ : Tuple="[CLS]" , lowerCAmelCase_ : List[str]="[MASK]" , **lowerCAmelCase_ : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A__ : List[str] =(
AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ , normalized=lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else mask_token
)
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : Dict =do_lower_case
A__ : int =remove_space
A__ : Tuple =keep_accents
A__ : Tuple =vocab_file
A__ : Union[str, Any] =False if not self.vocab_file else True
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : List[str] =[self.sep_token_id]
A__ : int =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self : str , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A__ : Dict =[self.sep_token_id]
A__ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ : Optional[int] =os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
return (out_vocab_file,)
| 134 | 0 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCAmelCase_ : Optional[Any] = 'bart'
lowerCAmelCase_ : Optional[int] = True
@st.cache(allow_output_mutation=lowercase )
def _lowerCamelCase ( ) -> str:
if LOAD_DENSE_INDEX:
_a = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
_a = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
_a = qar_model.eval()
else:
_a , _a = (None, None)
if MODEL_TYPE == "bart":
_a = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
_a = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
_a = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
_a = sas_model.eval()
else:
_a , _a = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowercase )
def _lowerCamelCase ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
_a = faiss.StandardGpuResources()
_a = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
_a = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
_a = faiss.IndexFlatIP(128 )
_a = faiss.index_cpu_to_gpu(lowercase , 1 , lowercase )
wikiaab_gpu_index_flat.add(lowercase ) # TODO fix for larger GPU
else:
_a , _a = (None, None)
_a = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowercase )
def _lowerCamelCase ( ) -> Dict:
_a = datasets.load_dataset("eli5" , name="LFQA_reddit" )
_a = elia["train_eli5"]
_a = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
_a = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowercase )
return (elia_train, eli5_train_q_index)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = load_indexes()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = load_models()
lowerCAmelCase_ , lowerCAmelCase_ : int = load_train_data()
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Optional[Any]=10 ) -> Optional[int]:
_a = embed_questions_for_retrieval([question] , lowercase , lowercase )
_a , _a = eli5_train_q_index.search(lowercase , lowercase )
_a = [elia_train[int(lowercase )] for i in I[0]]
return nn_examples
def _lowerCamelCase ( lowercase : str , lowercase : Dict="wiki40b" , lowercase : Any="dense" , lowercase : Optional[Any]=10 ) -> Tuple:
if source == "none":
_a , _a = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_a , _a = query_qa_dense_index(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
else:
_a , _a = query_es_index(
lowercase , lowercase , index_name="english_wiki40b_snippets_100w" , n_results=lowercase , )
_a = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
_a = "question: {} context: {}".format(lowercase , lowercase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowercase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowercase : None),
} )
def _lowerCamelCase ( lowercase : str , lowercase : Optional[Any] , lowercase : Tuple , lowercase : Union[str, Any]=64 , lowercase : Optional[Any]=256 , lowercase : List[str]=False , lowercase : int=2 , lowercase : List[str]=0.95 , lowercase : int=0.8 ) -> List[str]:
with torch.no_grad():
_a = qa_sas_generate(
lowercase , lowercase , lowercase , num_answers=1 , num_beams=lowercase , min_len=lowercase , max_len=lowercase , do_sample=lowercase , temp=lowercase , top_p=lowercase , top_k=lowercase , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
lowerCAmelCase_ : int = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
lowerCAmelCase_ : Optional[int] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCAmelCase_ : Tuple = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCAmelCase_ : Tuple = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
lowerCAmelCase_ : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
lowerCAmelCase_ : Optional[int] = st.sidebar.selectbox(
'',
action_list,
index=3,
)
lowerCAmelCase_ : Optional[int] = action_list.index(action_st)
lowerCAmelCase_ : Optional[Any] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
lowerCAmelCase_ : str = show_type == 'Show full text of passages'
else:
lowerCAmelCase_ : str = 3
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : int = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
lowerCAmelCase_ : Union[str, Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
lowerCAmelCase_ : int = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
lowerCAmelCase_ : Any = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
lowerCAmelCase_ : Optional[int] = 'wiki40b'
lowerCAmelCase_ : List[str] = 'dense'
lowerCAmelCase_ : Tuple = 'beam'
lowerCAmelCase_ : str = 2
lowerCAmelCase_ : str = 64
lowerCAmelCase_ : Union[str, Any] = 2_56
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Union[str, Any] = st.sidebar.checkbox('Generation options')
if generate_options:
lowerCAmelCase_ : Any = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
lowerCAmelCase_ : Tuple = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
lowerCAmelCase_ : int = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
lowerCAmelCase_ : Optional[int] = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
lowerCAmelCase_ : str = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCAmelCase_ : Union[str, Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
lowerCAmelCase_ : str = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
lowerCAmelCase_ : Optional[int] = None
# start main text
lowerCAmelCase_ : Optional[int] = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
lowerCAmelCase_ : str = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCAmelCase_ : Tuple = st.text_input('Enter your question here:', '')
else:
lowerCAmelCase_ : Tuple = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = make_support(question, source=wiki_source, method='dense', n_results=10)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
lowerCAmelCase_ : List[str] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCAmelCase_ : Tuple = support_list[:10]
lowerCAmelCase_ : str = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
lowerCAmelCase_ , lowerCAmelCase_ : str = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
lowerCAmelCase_ : Optional[Any] = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
lowerCAmelCase_ : Dict = res[1].strip()
if sec_titles == "":
lowerCAmelCase_ : Union[str, Any] = '[{}]({})'.format(res[0], wiki_url)
else:
lowerCAmelCase_ : Optional[int] = sec_titles.split(' & ')
lowerCAmelCase_ : int = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
lowerCAmelCase_ : Dict = find_nearest_training(question)
lowerCAmelCase_ : List[str] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
lowerCAmelCase_ : Optional[int] = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
lowerCAmelCase_ : Any = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 346 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv2ImageProcessor'
__a =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Dict , __a : int=None , __a : List[Any]=None , **__a : str ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Optional[int] , __a : Optional[Any] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : int ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , *__a : Optional[int] , **__a : Optional[Any] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : int ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase__ ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 346 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""PerceiverFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE = get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : Dict = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Dict = os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Dict = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Dict = os.path.join(__a , __a )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving model to {ckpt_dir}""" )
snake_case_ : int = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Optional[Any] = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[Any] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Optional[Any] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Tuple = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Tuple = (
os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
snake_case_ : List[Any] = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
snake_case_ : Any = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__a )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ : str = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : Any = os.path.join(__a , __a )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__a , __a )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ : Union[str, Any] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : List[Any] = os.path.join(__a , __a )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
snake_case_ : str = (
os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
snake_case_ : Any = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , )
snake_case_ : Optional[int] = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a )
| 327 | 1 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
def __init__(self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int]=2 , snake_case_ : Union[str, Any]=8 , snake_case_ : List[str]=True , snake_case_ : Dict=True , snake_case_ : Optional[Any]=True , snake_case_ : List[str]=True , snake_case_ : Tuple=9_9 , snake_case_ : int=1_6 , snake_case_ : Optional[int]=5 , snake_case_ : int=2 , snake_case_ : Tuple=3_6 , snake_case_ : Optional[Any]="gelu" , snake_case_ : str=0.0 , snake_case_ : Optional[int]=0.0 , snake_case_ : Tuple=5_1_2 , snake_case_ : str=1_6 , snake_case_ : str=2 , snake_case_ : int=0.02 , snake_case_ : Optional[int]=3 , snake_case_ : List[Any]=4 , snake_case_ : Any=None , ):
__a : int = parent
__a : Any = batch_size
__a : Optional[int] = seq_length
__a : List[str] = is_training
__a : Dict = use_input_mask
__a : Union[str, Any] = use_token_type_ids
__a : Tuple = use_labels
__a : Dict = vocab_size
__a : Optional[int] = hidden_size
__a : List[Any] = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : str = intermediate_size
__a : Dict = hidden_act
__a : str = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Optional[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : int = type_sequence_label_size
__a : List[Any] = initializer_range
__a : List[str] = num_labels
__a : List[str] = num_choices
__a : Optional[Any] = scope
def lowerCAmelCase (self : Union[str, Any] ):
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Optional[Any] = None
if self.use_input_mask:
__a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__a : Tuple = None
if self.use_token_type_ids:
__a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : str = None
__a : int = None
__a : Any = None
if self.use_labels:
__a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__a : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase (self : Union[str, Any] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def lowerCAmelCase (self : List[str] ):
__a : List[Any] = self.get_config()
__a : Optional[Any] = 3_0_0
return config
def lowerCAmelCase (self : Union[str, Any] ):
(
__a
) : Optional[Any] = self.prepare_config_and_inputs()
__a : Union[str, Any] = True
__a : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase (self : int , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Any ):
__a : Dict = MraModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__a : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
__a : List[str] = model(__snake_case , token_type_ids=__snake_case )
__a : Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase (self : List[str] , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , ):
__a : Optional[Any] = True
__a : Optional[int] = MraModel(__snake_case )
model.to(__snake_case )
model.eval()
__a : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
__a : Any = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , )
__a : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
__a : Union[str, Any] = MraForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
__a : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase (self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : int ):
__a : Optional[int] = MraForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
__a : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase (self : Dict , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : str ):
__a : Tuple = self.num_labels
__a : Dict = MraForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__a : Any = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase (self : str , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : int ):
__a : Tuple = self.num_labels
__a : Tuple = MraForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
__a : List[Any] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase (self : Any , snake_case_ : Any , snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Any , snake_case_ : str ):
__a : Optional[int] = self.num_choices
__a : int = MraForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__a : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase (self : Optional[Any] ):
__a : Union[str, Any] = self.prepare_config_and_inputs()
(
__a
) : Union[str, Any] = config_and_inputs
__a : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCamelCase__ ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Tuple = ()
def lowerCAmelCase (self : Any ):
__a : Tuple = MraModelTester(self )
__a : str = ConfigTester(self , config_class=__snake_case , hidden_size=3_7 )
def lowerCAmelCase (self : List[str] ):
self.config_tester.run_common_tests()
def lowerCAmelCase (self : List[str] ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCAmelCase (self : Any ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Dict = type
self.model_tester.create_and_check_model(*__snake_case )
def lowerCAmelCase (self : List[Any] ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowerCAmelCase (self : Optional[Any] ):
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def lowerCAmelCase (self : List[Any] ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def lowerCAmelCase (self : Tuple ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def lowerCAmelCase (self : Optional[Any] ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def lowerCAmelCase (self : int ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = MraModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''MRA does not output attentions''' )
def lowerCAmelCase (self : Union[str, Any] ):
return
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Union[str, Any] ):
__a : Union[str, Any] = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__a : List[str] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__a : Optional[int] = model(__snake_case )[0]
__a : Any = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , __snake_case )
__a : str = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def lowerCAmelCase (self : Optional[int] ):
__a : Dict = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__a : Optional[int] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__a : Dict = model(__snake_case )[0]
__a : Union[str, Any] = 5_0_2_6_5
__a : Dict = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , __snake_case )
__a : Dict = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def lowerCAmelCase (self : Any ):
__a : Dict = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__a : Optional[int] = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__a : Tuple = model(__snake_case )[0]
__a : List[Any] = 5_0_2_6_5
__a : str = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , __snake_case )
__a : int = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
| 357 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = PhobertTokenizer
_SCREAMING_SNAKE_CASE : int = False
def lowerCAmelCase (self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Optional[int] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
__a : Tuple = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__a : int = ['''#version: 0.2''', '''l à</w>''']
__a : List[Any] = {'''unk_token''': '''<unk>'''}
__a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case_ ) )
def lowerCAmelCase (self : Union[str, Any] , **snake_case_ : List[str] ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : Dict ):
__a : Union[str, Any] = '''Tôi là VinAI Research'''
__a : int = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : Any = '''Tôi là VinAI Research'''
__a : Union[str, Any] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
__a : List[str] = tokenizer.tokenize(snake_case_ )
print(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__a : str = tokens + [tokenizer.unk_token]
__a : str = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 90 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str =MODEL_FOR_OBJECT_DETECTION_MAPPING
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase :List[str] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase :str = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {
'''score''': ANY(SCREAMING_SNAKE_CASE_ ),
'''label''': ANY(SCREAMING_SNAKE_CASE_ ),
'''box''': {'''xmin''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymin''': ANY(SCREAMING_SNAKE_CASE_ ), '''xmax''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymax''': ANY(SCREAMING_SNAKE_CASE_ )},
} , )
import datasets
UpperCamelCase :int = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
UpperCamelCase :Optional[int] = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
UpperCamelCase :Tuple = object_detector(SCREAMING_SNAKE_CASE_ , threshold=0.0 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for outputs in batch_outputs:
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {
'''score''': ANY(SCREAMING_SNAKE_CASE_ ),
'''label''': ANY(SCREAMING_SNAKE_CASE_ ),
'''box''': {'''xmin''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymin''': ANY(SCREAMING_SNAKE_CASE_ ), '''xmax''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymax''': ANY(SCREAMING_SNAKE_CASE_ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :List[str] = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
UpperCamelCase :Optional[Any] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
UpperCamelCase :List[str] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :List[Any] = '''facebook/detr-resnet-50'''
UpperCamelCase :List[Any] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
UpperCamelCase :List[Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[int] = '''facebook/detr-resnet-50'''
UpperCamelCase :Any = pipeline('''object-detection''' , model=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
UpperCamelCase :str = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Tuple = 0.9985
UpperCamelCase :Dict = '''facebook/detr-resnet-50'''
UpperCamelCase :str = pipeline('''object-detection''' , model=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :str = '''Narsil/layoutlmv3-finetuned-funsd'''
UpperCamelCase :str = 0.9993
UpperCamelCase :Any = pipeline('''object-detection''' , model=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , )
| 259 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( lowercase, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : int =DDIMPipeline
UpperCamelCase_ : str =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase_ : str =PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
UpperCamelCase_ : Optional[Any] =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase_ : List[str] =False
def UpperCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
UpperCamelCase :Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
UpperCamelCase :Dict = DDIMScheduler()
UpperCamelCase :Any = {'''unet''': unet, '''scheduler''': scheduler}
return components
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> Any:
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
UpperCamelCase :List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase :List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Optional[int] = '''cpu'''
UpperCamelCase :Union[str, Any] = self.get_dummy_components()
UpperCamelCase :Optional[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCamelCase :str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
UpperCamelCase :Tuple = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
UpperCamelCase :List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 )
def UpperCAmelCase ( self ) -> int:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase ( self ) -> Optional[int]:
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase ( self ) -> Any:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :int = '''google/ddpm-cifar10-32'''
UpperCamelCase :Union[str, Any] = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = DDIMScheduler()
UpperCamelCase :Tuple = DDIMPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
ddim.to(SCREAMING_SNAKE_CASE_ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Dict = torch.manual_seed(0 )
UpperCamelCase :Optional[int] = ddim(generator=SCREAMING_SNAKE_CASE_ , eta=0.0 , output_type='''numpy''' ).images
UpperCamelCase :int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase :Tuple = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[Any] = '''google/ddpm-ema-bedroom-256'''
UpperCamelCase :Any = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :int = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = DDIMPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
ddpm.to(SCREAMING_SNAKE_CASE_ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = torch.manual_seed(0 )
UpperCamelCase :Optional[int] = ddpm(generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
UpperCamelCase :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase :Dict = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 259 | 1 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowerCAmelCase_ : List[str] = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
lowerCAmelCase_ : Any = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
lowerCAmelCase_ : Union[str, Any] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def UpperCamelCase__ ( self : List[str] , __a : Dict , __a : List[Any] , __a : Dict=4 , __a : int=False ):
_a = compute_bleu(
reference_corpus=__a , translation_corpus=__a , max_order=__a , smooth=__a )
((_a) , (_a) , (_a) , (_a) , (_a) , (_a)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 346 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = '▁'
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCAmelCase_ : Optional[int] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCAmelCase_ : List[str] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
lowerCAmelCase_ : List[Any] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCAmelCase_ : Union[str, Any] = {'mustc': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =MAX_MODEL_INPUT_SIZES
__a =['input_ids', 'attention_mask']
__a =[]
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any="<s>" , __a : List[str]="</s>" , __a : str="<pad>" , __a : List[str]="<unk>" , __a : Union[str, Any]=False , __a : Any=False , __a : List[str]=None , __a : Optional[int]=None , __a : Optional[Dict[str, Any]] = None , **__a : int , ):
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = do_upper_case
_a = do_lower_case
_a = load_json(__a )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(__a , self.sp_model_kwargs )
if lang_codes is not None:
_a = lang_codes
_a = LANGUAGES[lang_codes]
_a = [f'<lang:{lang}>' for lang in self.langs]
_a = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
_a = self.lang_tokens
_a = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_a = {}
@property
def UpperCamelCase__ ( self : str ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self : str ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self : Optional[int] , __a : Any ):
_a = new_tgt_lang
self.set_tgt_lang_special_tokens(__a )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [lang_code_id]
def UpperCamelCase__ ( self : Dict , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : List[str] , __a : Any ):
return self.encoder.get(__a , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self : str , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : str , __a : List[str] ):
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_a = self.sp_model.decode(__a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_a = []
else:
current_sub_tokens.append(__a )
_a = self.sp_model.decode(__a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self : int , __a : Any , __a : int=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : str , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
_a = Path(__a )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowerCamelCase ( lowercase : str , lowercase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_a = sentencepiece.SentencePieceProcessor(**lowercase )
spm.Load(str(lowercase ) )
return spm
def _lowerCamelCase ( lowercase : str ) -> Union[Dict, List]:
with open(lowercase , "r" ) as f:
return json.load(lowercase )
def _lowerCamelCase ( lowercase : Any , lowercase : str ) -> None:
with open(lowercase , "w" ) as f:
json.dump(lowercase , lowercase , indent=2 )
| 346 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
"""simple docstring"""
def __init__( self :Optional[Any] , snake_case :Dict , snake_case :List[str]=12 , snake_case :Tuple=7 , snake_case :Union[str, Any]=True , snake_case :Dict=True , snake_case :List[str]=True , snake_case :Optional[int]=99 , snake_case :Tuple=32 , snake_case :Optional[int]=32 , snake_case :List[str]=2 , snake_case :Dict=4 , snake_case :Dict=37 , snake_case :Optional[int]=0.1 , snake_case :int=0.1 , snake_case :Optional[Any]=512 , snake_case :Union[str, Any]=0.02 , snake_case :Tuple=0 , snake_case :Any=None , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : Union[str, Any] = batch_size
A_ : List[Any] = seq_length
A_ : List[str] = is_training
A_ : Dict = use_input_mask
A_ : List[str] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : Optional[Any] = projection_dim
A_ : Tuple = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : List[str] = intermediate_size
A_ : int = dropout
A_ : List[str] = attention_dropout
A_ : Optional[Any] = max_position_embeddings
A_ : Union[str, Any] = initializer_range
A_ : Tuple = scope
A_ : Union[str, Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[Any] = None
if self.use_input_mask:
A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
A_ : Union[str, Any] = input_mask.numpy()
A_ , A_ : Dict = input_mask.shape
A_ : List[Any] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case ):
A_ : Tuple = 1
A_ : int = 0
A_ : Union[str, Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[str] , snake_case :int , snake_case :Tuple ):
'''simple docstring'''
A_ : Tuple = TFBlipTextModel(config=snake_case )
A_ : Optional[Any] = model(snake_case , attention_mask=snake_case , training=snake_case )
A_ : Any = model(snake_case , training=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ , A_ , A_ : int = config_and_inputs
A_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : str = BlipTextModelTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = TFBlipTextModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Union[str, Any]=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=snake_case )
| 300 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''data2vec-vision'''
def __init__( self :int , snake_case :Optional[int]=768 , snake_case :Any=12 , snake_case :Any=12 , snake_case :Tuple=3_072 , snake_case :Any="gelu" , snake_case :Tuple=0.0 , snake_case :int=0.0 , snake_case :Any=0.02 , snake_case :str=1e-12 , snake_case :List[str]=224 , snake_case :Dict=16 , snake_case :int=3 , snake_case :int=False , snake_case :str=False , snake_case :List[Any]=False , snake_case :Optional[Any]=False , snake_case :Tuple=0.1 , snake_case :Optional[Any]=0.1 , snake_case :Any=True , snake_case :Optional[Any]=[3, 5, 7, 11] , snake_case :Dict=[1, 2, 3, 6] , snake_case :int=True , snake_case :List[Any]=0.4 , snake_case :Any=256 , snake_case :Union[str, Any]=1 , snake_case :Union[str, Any]=False , snake_case :Any=255 , **snake_case :int , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Any = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : Any = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : List[str] = layer_norm_eps
A_ : str = image_size
A_ : Optional[int] = patch_size
A_ : int = num_channels
A_ : Optional[Any] = use_mask_token
A_ : Optional[Any] = use_absolute_position_embeddings
A_ : Optional[int] = use_relative_position_bias
A_ : Dict = use_shared_relative_position_bias
A_ : Any = layer_scale_init_value
A_ : Optional[Any] = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Optional[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : str = use_auxiliary_head
A_ : List[Any] = auxiliary_loss_weight
A_ : List[str] = auxiliary_channels
A_ : Dict = auxiliary_num_convs
A_ : List[str] = auxiliary_concat_input
A_ : Optional[int] = semantic_loss_ignore_index
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 1e-4
| 300 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = FunnelTokenizer
lowerCAmelCase__ = FunnelTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def _lowercase ( self: Any ):
'''simple docstring'''
super().setUp()
_lowerCamelCase : Optional[int] = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _lowercase ( self: str ,**__lowerCAmelCase: List[Any] ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: int ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname ,**__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = "UNwant\u00E9d,running"
_lowerCamelCase : Tuple = "unwanted, running"
return input_text, output_text
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.tokenizer_class(self.vocab_file )
_lowerCamelCase : Any = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__lowerCAmelCase ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
_lowerCamelCase : Union[str, Any] = tokenizer("UNwant\u00E9d,running" )
_lowerCamelCase : str = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] ,[2] + [0] * sentence_len )
_lowerCamelCase : Tuple = tokenizer("UNwant\u00E9d,running" ,"UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] ,[2] + [0] * sentence_len + [1] * sentence_len ) | 365 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = 'masked_bert'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = pruning_method
_lowerCamelCase : str = mask_init
_lowerCamelCase : List[Any] = mask_scale | 340 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ ( __A ):
__A : List[str] = "convbert"
def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
lowercase__ : List[str] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[Any] = embedding_size
lowercase__ : Optional[Any] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Tuple = num_groups
lowercase__ : Optional[int] = classifier_dropout
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 87 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Any = LongformerTokenizer
a__ : Tuple = True
a__ : Optional[int] = LongformerTokenizerFast
a__ : str = True
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_= [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase_= dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase_= ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase_= {"""unk_token""": """<unk>"""}
UpperCAmelCase_= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__UpperCAmelCase : int ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : Any ) -> int:
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= """lower newer"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_= self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCAmelCase_= tokenizer.tokenize(__UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= tokens + [tokenizer.unk_token]
UpperCAmelCase_= [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
UpperCAmelCase_= self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__UpperCAmelCase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__UpperCAmelCase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_= self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
UpperCAmelCase_= tokenizer.encode("""sequence builders""" , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase_= tokenizer.encode("""multi-sequence build""" , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase_= tokenizer.encode(
"""sequence builders""" , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
UpperCAmelCase_= tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
UpperCAmelCase_= tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
UpperCAmelCase_= tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= """Encode this sequence."""
UpperCAmelCase_= tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCAmelCase_= tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
UpperCAmelCase_= tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase )
UpperCAmelCase_= tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCAmelCase_= tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase_= tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
# Testing spaces after special tokens
UpperCAmelCase_= """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )} ) # mask token has a left space
UpperCAmelCase_= tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
UpperCAmelCase_= """Encode <mask> sequence"""
UpperCAmelCase_= """Encode <mask>sequence"""
UpperCAmelCase_= tokenizer.encode(__UpperCAmelCase )
UpperCAmelCase_= encoded.index(__UpperCAmelCase )
UpperCAmelCase_= tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= tokenizer.encode(__UpperCAmelCase )
UpperCAmelCase_= encoded.index(__UpperCAmelCase )
UpperCAmelCase_= tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_= self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= """A, <mask> AllenNLP sentence."""
UpperCAmelCase_= tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
UpperCAmelCase_= tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCAmelCase_= tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase_= tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__UpperCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__UpperCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase_= self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
UpperCAmelCase_= json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase_= json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __UpperCAmelCase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __UpperCAmelCase )
self.assertEqual(post_processor_state["""trim_offsets"""] , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_= """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_= F"""{text_of_1_token} {text_of_1_token}"""
UpperCAmelCase_= self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
UpperCAmelCase_= tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
UpperCAmelCase_= self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
UpperCAmelCase_= tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ) + 1, len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
UpperCAmelCase_= self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
UpperCAmelCase_= tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
UpperCAmelCase_= self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
UpperCAmelCase_= tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__UpperCAmelCase ), len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
UpperCAmelCase_= F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase_= self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
UpperCAmelCase_= tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ) + 1, 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
UpperCAmelCase_= self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
UpperCAmelCase_= tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
UpperCAmelCase_= self.rust_tokenizer_class.from_pretrained(
__UpperCAmelCase , use_fast=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase )
UpperCAmelCase_= tokenizer_r(__UpperCAmelCase , return_offsets_mapping=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__UpperCAmelCase ), 1 + len(__UpperCAmelCase ) + 1 + len(__UpperCAmelCase )) , )
| 364 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__A = datasets.utils.logging.get_logger(__name__)
__A = ['''names''', '''prefix''']
__A = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__A = ['''encoding_errors''', '''on_bad_lines''']
__A = ['''date_format''']
@dataclass
class lowercase ( datasets.BuilderConfig):
"""simple docstring"""
a__ : str = ","
a__ : Optional[str] = None
a__ : Optional[Union[int, List[int], str]] = "infer"
a__ : Optional[List[str]] = None
a__ : Optional[List[str]] = None
a__ : Optional[Union[int, str, List[int], List[str]]] = None
a__ : Optional[Union[List[int], List[str]]] = None
a__ : Optional[str] = None
a__ : bool = True
a__ : Optional[Literal["c", "python", "pyarrow"]] = None
a__ : Dict[Union[int, str], Callable[[Any], Any]] = None
a__ : Optional[list] = None
a__ : Optional[list] = None
a__ : bool = False
a__ : Optional[Union[int, List[int]]] = None
a__ : Optional[int] = None
a__ : Optional[Union[str, List[str]]] = None
a__ : bool = True
a__ : bool = True
a__ : bool = False
a__ : bool = True
a__ : Optional[str] = None
a__ : str = "."
a__ : Optional[str] = None
a__ : str = '"'
a__ : int = 0
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : bool = True
a__ : bool = True
a__ : int = 0
a__ : bool = True
a__ : bool = False
a__ : Optional[str] = None
a__ : int = 1_0000
a__ : Optional[datasets.Features] = None
a__ : Optional[str] = "strict"
a__ : Literal["error", "warn", "skip"] = "error"
a__ : Optional[str] = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
if self.delimiter is not None:
UpperCAmelCase_= self.delimiter
if self.column_names is not None:
UpperCAmelCase_= self.column_names
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
UpperCAmelCase_= {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __UpperCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowercase ( datasets.ArrowBasedBuilder):
"""simple docstring"""
a__ : int = CsvConfig
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Dict ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCAmelCase_= dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCAmelCase , (str, list, tuple) ):
UpperCAmelCase_= data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= [files]
UpperCAmelCase_= [dl_manager.iter_files(__UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCAmelCase_= []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= [files]
UpperCAmelCase_= [dl_manager.iter_files(__UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"""files""": files} ) )
return splits
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : pa.Table ) -> pa.Table:
if self.config.features is not None:
UpperCAmelCase_= self.config.features.arrow_schema
if all(not require_storage_cast(__UpperCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
UpperCAmelCase_= pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__UpperCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCAmelCase_= table_cast(__UpperCAmelCase , __UpperCAmelCase )
return pa_table
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[Any] ) -> List[str]:
UpperCAmelCase_= self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCAmelCase_= (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__UpperCAmelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase ) ):
UpperCAmelCase_= pd.read_csv(__UpperCAmelCase , iterator=__UpperCAmelCase , dtype=__UpperCAmelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__UpperCAmelCase ):
UpperCAmelCase_= pa.Table.from_pandas(__UpperCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__UpperCAmelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCAmelCase )}: {e}""" )
raise
| 277 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = (UniPCMultistepScheduler,)
SCREAMING_SNAKE_CASE_ = (("num_inference_steps", 2_5),)
def a_ ( self, **lowerCAmelCase__) -> List[Any]:
snake_case_ = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**lowerCAmelCase__)
return config
def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> Any:
snake_case_ = dict(self.forward_default_kwargs)
snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__)
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__)
snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__)
new_scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ , snake_case_ = sample, sample
for t in range(lowerCAmelCase__, time_step + scheduler.config.solver_order + 1):
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def a_ ( self, lowerCAmelCase__=0, **lowerCAmelCase__) -> Dict:
snake_case_ = dict(self.forward_default_kwargs)
snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__)
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowerCAmelCase__)
scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__)
snake_case_ = scheduler_class.from_pretrained(lowerCAmelCase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__)
# copy over dummy past residual (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
snake_case_ = new_scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def a_ ( self, lowerCAmelCase__=None, **lowerCAmelCase__) -> List[Any]:
if scheduler is None:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
return sample
def a_ ( self) -> List[str]:
snake_case_ = dict(self.forward_default_kwargs)
snake_case_ = kwargs.pop('num_inference_steps', lowerCAmelCase__)
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase__, 'set_timesteps'):
scheduler.set_timesteps(lowerCAmelCase__)
elif num_inference_steps is not None and not hasattr(lowerCAmelCase__, 'set_timesteps'):
snake_case_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
snake_case_ = scheduler.timesteps[5]
snake_case_ = scheduler.timesteps[6]
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
def a_ ( self) -> Tuple:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case_ = UniPCMultistepScheduler(**self.get_scheduler_config())
snake_case_ = self.full_loop(scheduler=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2464) < 1e-3
snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
snake_case_ = DEISMultistepScheduler.from_config(scheduler.config)
snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config)
snake_case_ = self.full_loop(scheduler=lowerCAmelCase__)
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2464) < 1e-3
def a_ ( self) -> str:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__)
def a_ ( self) -> List[Any]:
self.check_over_configs(thresholding=lowerCAmelCase__)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase__, prediction_type=lowerCAmelCase__, sample_max_value=lowerCAmelCase__, solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, )
def a_ ( self) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__)
def a_ ( self) -> Tuple:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, )
snake_case_ = self.full_loop(
solver_order=lowerCAmelCase__, solver_type=lowerCAmelCase__, prediction_type=lowerCAmelCase__, )
assert not torch.isnan(lowerCAmelCase__).any(), "Samples have nan numbers"
def a_ ( self) -> Any:
self.check_over_configs(lower_order_final=lowerCAmelCase__)
self.check_over_configs(lower_order_final=lowerCAmelCase__)
def a_ ( self) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase__, time_step=0)
def a_ ( self) -> Tuple:
snake_case_ = self.full_loop()
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.2464) < 1e-3
def a_ ( self) -> str:
snake_case_ = self.full_loop(prediction_type='v_prediction')
snake_case_ = torch.mean(torch.abs(lowerCAmelCase__))
assert abs(result_mean.item() - 0.1014) < 1e-3
def a_ ( self) -> Dict:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(thresholding=lowerCAmelCase__, dynamic_thresholding_ratio=0)
snake_case_ = scheduler_class(**lowerCAmelCase__)
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase__)
for i, t in enumerate(scheduler.timesteps):
snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = scheduler.step(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__).prev_sample
assert sample.dtype == torch.floataa
def a_ ( self, **lowerCAmelCase__) -> str:
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config(**lowerCAmelCase__)
snake_case_ = scheduler_class(**lowerCAmelCase__)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 69 | """simple docstring"""
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = nn.functional.normalize(UpperCAmelCase )
snake_case_ = nn.functional.normalize(UpperCAmelCase )
return torch.mm(UpperCAmelCase , normalized_text_embeds.t() )
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = CLIPConfig
SCREAMING_SNAKE_CASE_ = ["CLIPEncoderLayer"]
def __init__( self, lowerCAmelCase__) -> Optional[int]:
super().__init__(lowerCAmelCase__)
snake_case_ = CLIPVisionModel(config.vision_config)
snake_case_ = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(17), requires_grad=lowerCAmelCase__)
snake_case_ = nn.Parameter(torch.ones(3), requires_grad=lowerCAmelCase__)
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output
snake_case_ = self.visual_projection(lowerCAmelCase__)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds).cpu().float().numpy()
snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds).cpu().float().numpy()
snake_case_ = []
snake_case_ = image_embeds.shape[0]
for i in range(lowerCAmelCase__):
snake_case_ = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ = 0.0
for concept_idx in range(len(special_cos_dist[0])):
snake_case_ = special_cos_dist[i][concept_idx]
snake_case_ = self.special_care_embeds_weights[concept_idx].item()
snake_case_ = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]})
snake_case_ = 0.01
for concept_idx in range(len(cos_dist[0])):
snake_case_ = cos_dist[i][concept_idx]
snake_case_ = self.concept_embeds_weights[concept_idx].item()
snake_case_ = round(concept_cos - concept_threshold + adjustment, 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
snake_case_ = [len(res['bad_concepts']) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = self.vision_model(lowerCAmelCase__)[1] # pooled_output
snake_case_ = self.visual_projection(lowerCAmelCase__)
snake_case_ = cosine_distance(lowerCAmelCase__, self.special_care_embeds)
snake_case_ = cosine_distance(lowerCAmelCase__, self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
snake_case_ = 0.0
snake_case_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
snake_case_ = torch.any(special_scores > 0, dim=1)
snake_case_ = special_care * 0.01
snake_case_ = special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
snake_case_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
snake_case_ = torch.any(concept_scores > 0, dim=1)
return images, has_nsfw_concepts
| 69 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : List[str] = CpmAntTokenizer
__lowercase : List[str] = False
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
@tooslow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""")
__SCREAMING_SNAKE_CASE = """今天天气真好!"""
__SCREAMING_SNAKE_CASE = ["""今天""", """天气""", """真""", """好""", """!"""]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """今天天气真好!"""
__SCREAMING_SNAKE_CASE = [tokenizer.bos_token] + tokens
__SCREAMING_SNAKE_CASE = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 255 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""")
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""google/mt5-small""")
__SCREAMING_SNAKE_CASE = tokenizer("""Hello there""" , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = tokenizer("""Hi I am""" , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__).loss
__SCREAMING_SNAKE_CASE = -tf.math.reduce_mean(lowerCAmelCase__).numpy()
__SCREAMING_SNAKE_CASE = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2E-4)
| 255 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = DebertaVaTokenizer
snake_case = DebertaVaTokenizerFast
snake_case = True
snake_case = True
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_A = DebertaVaTokenizer(__UpperCAmelCase , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = "this is a test"
_A = "this is a test"
return input_text, output_text
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = "<pad>"
_A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(__UpperCAmelCase ) , 30001 )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = " \tHeLLo!how \n Are yoU? "
_A = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
_A = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
_A = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "I was born in 92000, and this is falsé."
_A = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_A = DebertaVaTokenizer(__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = DebertaVaTokenizerFast(__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
_A = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = "I was born in 92000, and this is falsé."
_A = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_A = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
_A = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = "I was born in 92000, and this is falsé."
_A = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
_A = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
_A = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = "I was born in 92000, and this is falsé."
_A = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_A = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
_A = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = " \tHeLLo!how \n Are yoU? "
_A = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
_A = DebertaVaTokenizer(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = DebertaVaTokenizerFast(__UpperCAmelCase , do_lower_case=__UpperCAmelCase , split_by_punct=__UpperCAmelCase )
_A = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = "I was born in 92000, and this is falsé."
_A = tokenizer.convert_ids_to_tokens(tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
_A = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
_A = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = self.get_rust_tokenizer()
_A = tokenizer.encode(__UpperCAmelCase )
_A = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = "This is a test"
_A = [13, 1, 4398, 25, 21, 1289]
_A = ["▁", "T", "his", "▁is", "▁a", "▁test"]
_A = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
_A = DebertaVaTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
_A = DebertaVaTokenizerFast(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
_A = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = rust_tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
# fmt: off
_A = "I was born in 92000, and this is falsé."
_A = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_A = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
_A = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
_A = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = rust_tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = DebertaVaTokenizer(__UpperCAmelCase )
_A = tokenizer.encode("sequence builders" )
_A = tokenizer.encode("multi-sequence build" )
_A = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
_A = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __UpperCAmelCase , )
@slow
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = {"input_ids": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 79 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,):
snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : Union[str, Any] = parent
snake_case_ : str = batch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Tuple = image_size
snake_case_ : int = min_resolution
snake_case_ : int = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : Any = apply_ocr
def a__ ( self :Union[str, Any] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a__ ( self :List[Any] ):
snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def a__ ( self :int ):
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self :Any ):
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) )
self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) )
def a__ ( self :int ):
snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 )
self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} )
def a__ ( self :Optional[Any] ):
pass
def a__ ( self :Union[str, Any] ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,Image.Image )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,_UpperCamelCase )
self.assertIsInstance(encoding.boxes ,_UpperCamelCase )
# Test batched
snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :Tuple ):
# Initialize image_processing
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,np.ndarray )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :Optional[Any] ):
# Initialize image_processing
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase ,torch.Tensor )
# Test not batched input
snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def a__ ( self :List[Any] ):
# with apply_OCR = True
snake_case_ : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_UpperCamelCase )
self.assertListEqual(encoding.boxes ,_UpperCamelCase )
# with apply_OCR = False
snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase )
snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) ) | 8 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __UpperCamelCase ( lowercase__ ):
lowercase : Union[List[PIL.Image.Image], np.ndarray]
lowercase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline | 8 | 1 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_SCREAMING_SNAKE_CASE : Dict = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCamelCase_( snake_case : int , snake_case : Union[str, Any] ):
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCamelCase_( snake_case : Dict ):
'''simple docstring'''
snake_case_ = _TestCommandArgs(dataset=__snake_case , all_configs=__snake_case , save_infos=__snake_case )
snake_case_ = TestCommand(*__snake_case )
test_command.run()
snake_case_ = os.path.join(__snake_case , "README.md" )
assert os.path.exists(__snake_case )
snake_case_ = DatasetInfosDict.from_directory(__snake_case )
snake_case_ = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_3_5_1_5_6_3,
"num_examples": 1_0_0_0_0,
},
{
"name": "validation",
"num_bytes": 2_3_8_4_1_8,
"num_examples": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
snake_case_ = getattr(dataset_infos["default"] , __snake_case ), getattr(expected_dataset_infos["default"] , __snake_case )
if key == "num_bytes":
assert is_apercent_close(__snake_case , __snake_case )
elif key == "splits":
assert list(__snake_case ) == list(__snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 85 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :int ):
__UpperCAmelCase = 2
__UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case_ )
if n > 1:
factors.append(snake_case_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _UpperCAmelCase :
def __init__( self : Dict , _lowercase : int , _lowercase : List[str]=13 , _lowercase : Dict=32 , _lowercase : Any=2 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=16 , _lowercase : Optional[int]=[1, 2, 1] , _lowercase : int=[2, 2, 4] , _lowercase : Optional[Any]=2 , _lowercase : Union[str, Any]=2.0 , _lowercase : Any=True , _lowercase : Optional[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : Dict=0.1 , _lowercase : str="gelu" , _lowercase : List[Any]=False , _lowercase : List[Any]=True , _lowercase : Optional[Any]=0.02 , _lowercase : str=1E-5 , _lowercase : str=True , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Any=10 , _lowercase : int=8 , _lowercase : Optional[Any]=["stage1", "stage2", "stage3"] , _lowercase : Optional[Any]=[1, 2, 3] , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = patch_norm
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = is_training
__UpperCAmelCase = scope
__UpperCAmelCase = use_labels
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = encoder_stride
__UpperCAmelCase = out_features
__UpperCAmelCase = out_indices
def a ( self : int ):
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Dict ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : int ):
__UpperCAmelCase = MaskFormerSwinModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
__UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a ( self : int , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Dict ):
__UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowercase ):
__UpperCAmelCase = ['''stem''']
__UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ : Optional[int] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
a__ : List[str] = False
a__ : int = False
a__ : str = False
a__ : str = False
a__ : Any = False
def a ( self : Optional[Any] ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def a ( self : int ):
pass
def a ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : str ):
return
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def a ( self : List[Any] ):
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def a ( self : str ):
pass
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def a ( self : Optional[Any] ):
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def a ( self : Optional[Any] ):
pass
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : Tuple ):
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# Swin has a different seq_length
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a ( self : str ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = 3
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def a ( self : Any ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a ( self : str ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a ( self : Tuple ):
pass
def a ( self : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowercase : List[str] ):
__UpperCAmelCase = 0
return t
def check_equivalence(_lowercase : List[Any] , _lowercase : Any , _lowercase : str , _lowercase : List[str]={} ):
with torch.no_grad():
__UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase )
__UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(_lowercase : Dict , _lowercase : Optional[Any] ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif isinstance(_lowercase , _lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has'''
F''' `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}.'''
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
@require_torch
class _UpperCAmelCase ( unittest.TestCase , _lowerCAmelCase ):
a__ : Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ : List[str] = MaskFormerSwinConfig
def a ( self : List[str] ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
def a ( self : List[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
__UpperCAmelCase = backbone_class(_lowercase )
backbone.to(_lowercase )
backbone.eval()
__UpperCAmelCase = backbone(**_lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__UpperCAmelCase = backbone(**_lowercase , output_hidden_states=_lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__UpperCAmelCase = backbone(**_lowercase , output_attentions=_lowercase )
self.assertIsNotNone(outputs.attentions )
| 86 | 1 |
import argparse
import os
import re
import packaging.version
lowercase : Optional[Any] = """examples/"""
lowercase : int = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowercase : Any = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
lowercase : Any = """README.md"""
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase : str = f.read()
lowercase , lowercase : Any = REPLACE_PATTERNS[pattern]
lowercase : List[Any] = replace.replace("""VERSION""" , SCREAMING_SNAKE_CASE__ )
lowercase : int = re_pattern.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , pattern="""examples""" )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> str:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> Tuple:
lowercase : str = """🤗 Transformers currently provides the following architectures"""
lowercase : Union[str, Any] = """1. Want to contribute a new model?"""
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase : Union[str, Any] = f.readlines()
# Find the start of the list.
lowercase : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowercase : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> Union[str, Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowercase : Optional[Any] = f.read()
lowercase : Dict = REPLACE_PATTERNS["""init"""][0].search(SCREAMING_SNAKE_CASE__ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__=False ) -> List[str]:
lowercase : Tuple = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowercase : Any = default_version.base_version
elif patch:
lowercase : Union[str, Any] = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
lowercase : Optional[int] = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
lowercase : Optional[int] = input(f"Which version are you releasing? [{default_version}]" )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
lowercase : Union[str, Any] = default_version
print(f"Updating version to {version}." )
global_version_update(SCREAMING_SNAKE_CASE__ , patch=SCREAMING_SNAKE_CASE__ )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _snake_case( ) -> Any:
lowercase : Dict = get_version()
lowercase : List[str] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
lowercase : Tuple = current_version.base_version
# Check with the user we got that right.
lowercase : Optional[Any] = input(f"Which version are we developing now? [{dev_version}]" )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
lowercase : Union[str, Any] = dev_version
print(f"Updating version to {version}." )
global_version_update(SCREAMING_SNAKE_CASE__ )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 20 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Union[str, Any] = []
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
) )
return embed
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Optional[Any] = []
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
f"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
f"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Optional[Any] = []
token.append((f"cvt.encoder.stages.{idx}.cls_token", """stage2.cls_token""") )
return token
def _snake_case( ) -> Dict:
lowercase : Optional[Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Any = """imagenet-1k-id2label.json"""
lowercase : List[str] = 1_000
lowercase : int = """huggingface/label-files"""
lowercase : Union[str, Any] = num_labels
lowercase : Optional[Any] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) ) , """r""" ) )
lowercase : List[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase : Dict = idalabel
lowercase : List[str] = {v: k for k, v in idalabel.items()}
lowercase : List[str] = CvtConfig(num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
lowercase : Tuple = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
lowercase : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase : int = [2, 2, 20]
lowercase : Optional[int] = [3, 12, 16]
lowercase : str = [192, 768, 1_024]
lowercase : Union[str, Any] = CvtForImageClassification(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
lowercase : Optional[Any] = image_size
lowercase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device("""cpu""" ) )
lowercase : Optional[Any] = OrderedDict()
lowercase : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase : Optional[Any] = list_of_state_dict + cls_token(SCREAMING_SNAKE_CASE__ )
lowercase : str = list_of_state_dict + embeddings(SCREAMING_SNAKE_CASE__ )
for cnt in range(config.depth[idx] ):
lowercase : List[str] = list_of_state_dict + attention(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
lowercase : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase : Optional[int] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 20 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class a ( a__ ):
def __init__( self , *_snake_case , **_snake_case ):
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 350 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0 ):
'''simple docstring'''
if name is None:
_lowerCAmelCase : Union[str, Any] = None
else:
_lowerCAmelCase : Any = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
_lowerCAmelCase : Tuple = fmt.format(_lowerCamelCase )
# Print and recurse (if needed).
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if msg is not None:
print(_lowerCamelCase )
for k in val.keys():
recursive_print(_lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(_lowerCamelCase , torch.Tensor ):
print(_lowerCamelCase , ":" , val.size() )
else:
print(_lowerCamelCase , ":" , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCAmelCase : Optional[int] = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCAmelCase : Dict = param.view(*_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = param.transpose(0 , 2 )
_lowerCAmelCase : str = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCAmelCase : Any = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCAmelCase : List[Any] = param.view(*_lowerCamelCase )
_lowerCAmelCase : Tuple = param.transpose(0 , 1 ).contiguous()
_lowerCAmelCase : Optional[int] = param.view(*_lowerCamelCase )
return param
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = {}
# old versions did not store training args
_lowerCAmelCase : Optional[Any] = input_state_dict.get("args" , _lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCAmelCase : List[Any] = ds_args.padded_vocab_size
_lowerCAmelCase : Dict = ds_args.max_position_embeddings
_lowerCAmelCase : Any = ds_args.hidden_size
_lowerCAmelCase : List[Any] = ds_args.num_layers
_lowerCAmelCase : Any = ds_args.num_attention_heads
_lowerCAmelCase : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCAmelCase : int = config.n_head
# The hidden_size per head.
_lowerCAmelCase : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCAmelCase : Tuple = input_state_dict["checkpoint_version"]
else:
_lowerCAmelCase : List[Any] = 0.0
# The model.
_lowerCAmelCase : Tuple = input_state_dict["model"]
# The language model.
_lowerCAmelCase : List[Any] = model["language_model"]
# The embeddings.
_lowerCAmelCase : Any = lm["embedding"]
# The word embeddings.
_lowerCAmelCase : Union[str, Any] = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
_lowerCAmelCase : Optional[int] = word_embeddings[: config.vocab_size, :]
_lowerCAmelCase : int = word_embeddings
# The position embeddings.
_lowerCAmelCase : Any = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCAmelCase : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
_lowerCAmelCase : int = pos_embeddings
# The transformer.
_lowerCAmelCase : int = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
_lowerCAmelCase : Optional[int] = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
_lowerCAmelCase : int = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCAmelCase : Optional[Any] = layer_re.match(_lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCAmelCase : Optional[Any] = int(m.group(1 ) )
# The name of the operation.
_lowerCAmelCase : List[str] = m.group(2 )
# Is it a weight or a bias?
_lowerCAmelCase : List[Any] = m.group(3 )
# The name of the layer.
_lowerCAmelCase : str = F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
_lowerCAmelCase : Optional[Any] = "ln_1" if op_name.startswith("input" ) else "ln_2"
_lowerCAmelCase : List[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCAmelCase : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCAmelCase : Tuple = torch.tensor(-1e4 , dtype=torch.floataa )
_lowerCAmelCase : Tuple = masked_bias
_lowerCAmelCase : List[Any] = fix_query_key_value_ordering(_lowerCamelCase , _lowerCamelCase , 3 , _lowerCamelCase , _lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCAmelCase : Dict = out_val.transpose(0 , 1 ).contiguous()
# Store.
_lowerCAmelCase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCAmelCase : Optional[int] = fix_query_key_value_ordering(_lowerCamelCase , _lowerCamelCase , 3 , _lowerCamelCase , _lowerCamelCase )
# Store. No change of shape.
_lowerCAmelCase : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCAmelCase : Any = megatron_to_transformers[op_name]
_lowerCAmelCase : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCAmelCase : Tuple = megatron_to_transformers[op_name]
_lowerCAmelCase : Dict = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCAmelCase : Dict = transformer["final_layernorm.weight"]
_lowerCAmelCase : List[str] = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCAmelCase : Union[str, Any] = word_embeddings
# It should be done!
return output_state_dict
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=_lowerCamelCase , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=_lowerCamelCase , help="An optional config json file describing the pre-trained model." , )
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
# Extract the basename.
_lowerCAmelCase : List[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location="cpu" )
else:
_lowerCAmelCase : List[Any] = torch.load(args.path_to_checkpoint , map_location="cpu" )
_lowerCAmelCase : Dict = input_state_dict.get("args" , _lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCAmelCase : Union[str, Any] = "gelu_fast"
elif ds_args.openai_gelu:
_lowerCAmelCase : int = "gelu_new"
else:
_lowerCAmelCase : Dict = "gelu"
else:
# in the very early days this used to be "gelu_new"
_lowerCAmelCase : Dict = "gelu_new"
# Spell out all parameters in case the defaults change.
_lowerCAmelCase : Union[str, Any] = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=_lowerCamelCase , summary_activation=_lowerCamelCase , summary_proj_to_labels=_lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
_lowerCAmelCase : str = GPTaConfig.from_json_file(args.config_file )
_lowerCAmelCase : Dict = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
_lowerCAmelCase : Tuple = convert_megatron_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCamelCase , _lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCAmelCase : Optional[int] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCAmelCase : Optional[int] = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
_lowerCAmelCase : Union[str, Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
_lowerCAmelCase : str = "gpt2"
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Dict = type(_lowerCamelCase ).__name__
_lowerCAmelCase : List[str] = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(_lowerCamelCase )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_lowerCamelCase )
# Store the state_dict to file.
_lowerCAmelCase : List[str] = os.path.join(_lowerCamelCase , "pytorch_model.bin" )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_lowerCamelCase , _lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 36 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : str = list(s_dict.keys() )
for key in keys:
snake_case_ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f'''{key} -> {new_key}''' )
snake_case_ : Tuple = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
snake_case_ : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : List[Any] = os.path.basename(_UpperCamelCase )
snake_case_ : Any = url.split('''/''' )[-2]
snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop:
while True:
snake_case_ : Dict = source.read(8_192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case_ : str = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : int = original_checkpoint['''dims''']
snake_case_ : List[str] = original_checkpoint['''model_state_dict''']
snake_case_ : str = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
snake_case_ : Optional[int] = True
snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
snake_case_ : List[str] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Any = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 | 0 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float , snake_case : int )-> float:
'''simple docstring'''
UpperCAmelCase__ : Dict = x
UpperCAmelCase__ : int = y
for step in range(snake_case ): # noqa: B007
UpperCAmelCase__ : Any = a * a - b * b + x
UpperCAmelCase__ : Optional[int] = 2 * a * b + y
UpperCAmelCase__ : Tuple = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def SCREAMING_SNAKE_CASE__ ( snake_case : float )-> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def SCREAMING_SNAKE_CASE__ ( snake_case : float )-> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case , 1 , 1 ) )
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 800 , snake_case : int = 600 , snake_case : float = -0.6 , snake_case : float = 0 , snake_case : float = 3.2 , snake_case : int = 50 , snake_case : bool = True , )-> Image.Image:
'''simple docstring'''
UpperCAmelCase__ : Any = Image.new("RGB" , (image_width, image_height) )
UpperCAmelCase__ : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(snake_case ):
for image_y in range(snake_case ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase__ : int = figure_width / image_width * image_height
UpperCAmelCase__ : Optional[int] = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase__ : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase__ : Optional[int] = get_distance(snake_case , snake_case , snake_case )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase__ : Any = get_color_coded_rgb(snake_case )
else:
UpperCAmelCase__ : Optional[int] = get_black_and_white_rgb(snake_case )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowerCAmelCase : Optional[int] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 298 |
"""simple docstring"""
import functools
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : str = len(snake_case )
@functools.cache
def min_distance(snake_case : int , snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
a_ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
a_ = '▁'
# Segments (not really needed)
a_ = 0
a_ = 1
a_ = 2
a_ = 3
a_ = 4
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = """left"""
snake_case_ = XLNetTokenizer
def __init__( self : Optional[Any] , __lowercase : List[Any]=None , __lowercase : Tuple=None , __lowercase : int=False , __lowercase : List[str]=True , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]="<s>" , __lowercase : Any="</s>" , __lowercase : int="<unk>" , __lowercase : Optional[int]="<sep>" , __lowercase : str="<pad>" , __lowercase : Optional[Any]="<cls>" , __lowercase : Dict="<mask>" , __lowercase : List[Any]=["<eop>", "<eod>"] , **__lowercase : Any , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Optional[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
vocab_file=__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , additional_special_tokens=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : int =3
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Optional[Any] =remove_space
SCREAMING_SNAKE_CASE__ : Optional[int] =keep_accents
SCREAMING_SNAKE_CASE__ : Tuple =vocab_file
SCREAMING_SNAKE_CASE__ : int =False if not self.vocab_file else True
def __magic_name__ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __magic_name__ ( self : Optional[int] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __magic_name__ ( self : Dict , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,) | 152 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
a_ = re.compile(R'\s+')
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(UpperCamelCase__, '''''', example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def _a( UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[len(UpperCamelCase__ ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(UpperCamelCase__ ), "line_max": max(UpperCamelCase__ )}
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Any ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Optional[Any]=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''auto-generated''', '''autogenerated''', '''automatically generated''']
SCREAMING_SNAKE_CASE__ : Dict =example['''content'''].splitlines()
for _, line in zip(range(UpperCamelCase__ ), UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple=5, UpperCamelCase__ : Optional[Any]=0.0_5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =['''unit tests''', '''test file''', '''configuration file''']
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
# first test
for _, line in zip(range(UpperCamelCase__ ), UpperCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
SCREAMING_SNAKE_CASE__ : List[str] =example['''content'''].count('''\n''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =['''def ''', '''class ''', '''for ''', '''while ''']
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Dict=4 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =example['''content'''].splitlines()
SCREAMING_SNAKE_CASE__ : Optional[Any] =0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _a( UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str =tokenizer(example['''content'''], truncation=UpperCamelCase__ )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(example['''content'''] ) / len(UpperCamelCase__ )
return {"ratio": ratio}
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict ={}
results.update(get_hash(UpperCamelCase__ ) )
results.update(line_stats(UpperCamelCase__ ) )
results.update(alpha_stats(UpperCamelCase__ ) )
results.update(char_token_ratio(UpperCamelCase__ ) )
results.update(is_autogenerated(UpperCamelCase__ ) )
results.update(is_config_or_test(UpperCamelCase__ ) )
results.update(has_no_keywords(UpperCamelCase__ ) )
results.update(has_few_assignments(UpperCamelCase__ ) )
return results
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : str ):
'''simple docstring'''
if not check_uniques(UpperCamelCase__, UpperCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
with open(UpperCamelCase__, '''rb''' ) as f_in:
with gzip.open(str(UpperCamelCase__ ) + '''.gz''', '''wb''', compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCamelCase__, UpperCamelCase__ )
os.unlink(UpperCamelCase__ )
# Settings
a_ = HfArgumentParser(PreprocessingArguments)
a_ = parser.parse_args()
if args.num_workers is None:
a_ = multiprocessing.cpu_count()
a_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
a_ = time.time()
a_ = load_dataset(args.dataset_name, split='train')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
a_ = time.time()
a_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
a_ = set(ds.unique('hash'))
a_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
a_ = time.time()
a_ = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
a_ = time.time()
a_ , a_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
a_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
a_ = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
a_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
a_ = str(data_dir / F'''file-{file_number+1:012}.json''')
a_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''') | 152 | 1 |
'''simple docstring'''
def __magic_name__ ( A = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
try:
snake_case = int(A )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
snake_case = 1
snake_case = 2
while i * i <= n:
while n % i == 0:
snake_case = i
n //= i
i += 1
if n > 1:
snake_case = n
return int(A )
if __name__ == "__main__":
print(f"{solution() = }")
| 332 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332 | 1 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
_a = get_logger(__name__)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=None ):
__lowercase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self, UpperCAmelCase__, getattr(UpperCAmelCase__, UpperCAmelCase__ ) )
__lowercase = module._original_module if isinstance(UpperCAmelCase__, _PatchedModuleObj ) else module
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : Any = []
def __init__( self : List[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : str, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : int=None ):
__lowercase = obj
__lowercase = target
__lowercase = new
__lowercase = target.split("." )[0]
__lowercase = {}
__lowercase = attrs or []
def __enter__( self : List[str] ):
*__lowercase ,__lowercase = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(UpperCAmelCase__ ) ):
try:
__lowercase = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__lowercase = getattr(self.obj, UpperCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(UpperCAmelCase__, _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__lowercase = obj_attr
# patch at top level
setattr(self.obj, UpperCAmelCase__, _PatchedModuleObj(UpperCAmelCase__, attrs=self.attrs ) )
__lowercase = getattr(self.obj, UpperCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(UpperCAmelCase__, UpperCAmelCase__, _PatchedModuleObj(getattr(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), attrs=self.attrs ) )
__lowercase = getattr(UpperCAmelCase__, UpperCAmelCase__ )
# finally set the target attribute
setattr(UpperCAmelCase__, UpperCAmelCase__, self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__lowercase = getattr(import_module(".".join(UpperCAmelCase__ ) ), UpperCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj, UpperCAmelCase__ ) is attr_value:
__lowercase = getattr(self.obj, UpperCAmelCase__ )
setattr(self.obj, UpperCAmelCase__, self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__lowercase = globals()["__builtins__"][target_attr]
setattr(self.obj, UpperCAmelCase__, self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : str, *UpperCAmelCase__ : Tuple ):
for attr in list(self.original ):
setattr(self.obj, UpperCAmelCase__, self.original.pop(UpperCAmelCase__ ) )
def _lowercase ( self : Optional[int] ):
self.__enter__()
self._active_patches.append(self )
def _lowercase ( self : List[str] ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 17 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 99 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[int] = "wav2vec2"
def __init__( self : Any , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : Optional[int]=7_68 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : int=12 , lowerCamelCase__ : List[str]=30_72 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Any=0.0 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : int=0.0_2 , lowerCamelCase__ : Optional[int]=1E-5 , lowerCamelCase__ : Optional[int]="group" , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase__ : Any=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase__ : int=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase__ : Any=False , lowerCamelCase__ : List[Any]=1_28 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[int]=0.0_5 , lowerCamelCase__ : Optional[int]=10 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : Optional[Any]=10 , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : Dict=3_20 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Optional[Any]=1_00 , lowerCamelCase__ : str=2_56 , lowerCamelCase__ : Tuple=2_56 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : int="sum" , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : str=2_56 , lowerCamelCase__ : Any=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCamelCase__ : Tuple=(5, 3, 3, 1, 1) , lowerCamelCase__ : int=(1, 2, 3, 1, 1) , lowerCamelCase__ : Optional[Any]=5_12 , lowerCamelCase__ : List[str]=0 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : Any=2 , lowerCamelCase__ : str=False , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Any=3 , lowerCamelCase__ : Any=None , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : List[str] , ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Optional[int] = feat_extract_norm
_UpperCAmelCase : Optional[int] = feat_extract_activation
_UpperCAmelCase : Optional[int] = list(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = list(lowerCamelCase__ )
_UpperCAmelCase : Tuple = list(lowerCamelCase__ )
_UpperCAmelCase : Tuple = conv_bias
_UpperCAmelCase : str = num_conv_pos_embeddings
_UpperCAmelCase : Tuple = num_conv_pos_embedding_groups
_UpperCAmelCase : Tuple = len(self.conv_dim )
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : Union[str, Any] = hidden_dropout
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : Tuple = feat_proj_dropout
_UpperCAmelCase : Tuple = final_dropout
_UpperCAmelCase : Tuple = layerdrop
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Dict = do_stable_layer_norm
_UpperCAmelCase : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : List[str] = apply_spec_augment
_UpperCAmelCase : Tuple = mask_time_prob
_UpperCAmelCase : List[str] = mask_time_length
_UpperCAmelCase : Union[str, Any] = mask_time_min_masks
_UpperCAmelCase : Any = mask_feature_prob
_UpperCAmelCase : Union[str, Any] = mask_feature_length
_UpperCAmelCase : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase : Optional[int] = num_codevectors_per_group
_UpperCAmelCase : Optional[int] = num_codevector_groups
_UpperCAmelCase : Tuple = contrastive_logits_temperature
_UpperCAmelCase : Dict = feat_quantizer_dropout
_UpperCAmelCase : str = num_negatives
_UpperCAmelCase : Tuple = codevector_dim
_UpperCAmelCase : Dict = proj_codevector_dim
_UpperCAmelCase : Any = diversity_loss_weight
# ctc loss
_UpperCAmelCase : Dict = ctc_loss_reduction
_UpperCAmelCase : Dict = ctc_zero_infinity
# adapter
_UpperCAmelCase : Tuple = add_adapter
_UpperCAmelCase : List[Any] = adapter_kernel_size
_UpperCAmelCase : Optional[Any] = adapter_stride
_UpperCAmelCase : List[str] = num_adapter_layers
_UpperCAmelCase : List[Any] = output_hidden_size or hidden_size
_UpperCAmelCase : Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCAmelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase : Any = list(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = list(lowerCamelCase__ )
_UpperCAmelCase : List[str] = xvector_output_dim
@property
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 322 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
A: Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = ['input_features', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE=80 , _SCREAMING_SNAKE_CASE=16000 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE="hamming_window" , _SCREAMING_SNAKE_CASE=3_2768.0 , _SCREAMING_SNAKE_CASE=0.97 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = feature_size
UpperCAmelCase : str = sampling_rate
UpperCAmelCase : List[str] = padding_value
UpperCAmelCase : Dict = hop_length
UpperCAmelCase : str = win_length
UpperCAmelCase : int = frame_signal_scale
UpperCAmelCase : Tuple = preemphasis_coeff
UpperCAmelCase : Any = mel_floor
UpperCAmelCase : Optional[int] = normalize_means
UpperCAmelCase : Union[str, Any] = normalize_vars
UpperCAmelCase : Optional[int] = win_function
UpperCAmelCase : Optional[int] = return_attention_mask
UpperCAmelCase : Optional[Any] = win_length * sampling_rate // 1000
UpperCAmelCase : List[Any] = hop_length * sampling_rate // 1000
UpperCAmelCase : Optional[int] = optimal_fft_length(self.sample_size )
UpperCAmelCase : List[Any] = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
UpperCAmelCase : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Any = window_function(window_length=self.sample_size , name=self.win_function )
UpperCAmelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
UpperCAmelCase : Optional[int] = spectrogram(
one_waveform * self.frame_signal_scale , window=_SCREAMING_SNAKE_CASE , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_SCREAMING_SNAKE_CASE , preemphasis=self.preemphasis_coeff , mel_filters=_SCREAMING_SNAKE_CASE , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
if self.normalize_means:
UpperCAmelCase : str = x[:input_length].mean(axis=0 )
UpperCAmelCase : Optional[int] = np.subtract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.normalize_vars:
UpperCAmelCase : Any = x[:input_length].std(axis=0 )
UpperCAmelCase : List[str] = np.divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if input_length < x.shape[0]:
UpperCAmelCase : Dict = padding_value
# make sure array is in float32
UpperCAmelCase : Dict = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[np.ndarray]:
'''simple docstring'''
UpperCAmelCase : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.padding_value ) for x, n in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
UpperCAmelCase : List[str] = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase : Any = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase : str = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCAmelCase : List[Any] = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : Union[str, Any] = [raw_speech]
# extract fbank features
UpperCAmelCase : Optional[int] = [self._extract_mfsc_features(_SCREAMING_SNAKE_CASE ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase : Optional[Any] = BatchFeature({"""input_features""": features} )
UpperCAmelCase : List[Any] = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# make sure list is in array format
UpperCAmelCase : Union[str, Any] = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
UpperCAmelCase : Union[str, Any] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
UpperCAmelCase : Optional[Any] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCAmelCase : Union[str, Any] = (
np.array(_SCREAMING_SNAKE_CASE , dtype=np.intaa )
if self._get_padding_strategies(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCAmelCase : Any = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_SCREAMING_SNAKE_CASE )
if return_tensors is not None:
UpperCAmelCase : Optional[Any] = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
| 109 |
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = n
__lowerCamelCase = [None] * self.n
__lowerCamelCase = 0 # index of the first element
__lowerCamelCase = 0
__lowerCamelCase = 0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def lowercase_ ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def lowercase_ ( self ) -> str:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
__lowerCamelCase = data
__lowerCamelCase = (self.rear + 1) % self.n
self.size += 1
return self
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW' )
__lowerCamelCase = self.array[self.front]
__lowerCamelCase = None
__lowerCamelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 90 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase__ : Any = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase__ : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __UpperCamelCase ( _UpperCAmelCase ):
re.sub("<n>", "", UpperCAmelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCAmelCase__ ) )
| 371 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''fnet'''
def __init__( self : Tuple , UpperCAmelCase_ : str=32_000 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : List[str]="gelu_new" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : List[Any]=1e-12 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : List[Any]=2 , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[Any] = use_tpu_fourier_optimizations
__UpperCAmelCase : List[Any] = tpu_short_seq_length
| 37 | 0 |
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = (DDIMParallelScheduler,)
_lowerCAmelCase : List[str] = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def snake_case ( self , **lowerCAmelCase ):
"""simple docstring"""
snake_case = {
'num_train_timesteps': 10_00,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase )
return config
def snake_case ( self , **lowerCAmelCase ):
"""simple docstring"""
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(**lowerCAmelCase )
snake_case = scheduler_class(**lowerCAmelCase )
snake_case ,snake_case = 10, 0.0
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for t in scheduler.timesteps:
snake_case = model(lowerCAmelCase , lowerCAmelCase )
snake_case = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def snake_case ( self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase )
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(steps_offset=1 )
snake_case = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def snake_case ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def snake_case ( self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5
def snake_case ( self ):
"""simple docstring"""
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**lowerCAmelCase )
snake_case ,snake_case = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
snake_case = self.dummy_sample_deter + 0.1
snake_case = self.dummy_sample_deter - 0.1
snake_case = samplea.shape[0]
snake_case = torch.stack([samplea, samplea, samplea] , dim=0 )
snake_case = torch.arange(lowerCAmelCase )[0:3, None].repeat(1 , lowerCAmelCase )
snake_case = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
snake_case = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCAmelCase )
snake_case = torch.sum(torch.abs(lowerCAmelCase ) )
snake_case = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def snake_case ( self ):
"""simple docstring"""
snake_case = self.full_loop()
snake_case = torch.sum(torch.abs(lowerCAmelCase ) )
snake_case = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def snake_case ( self ):
"""simple docstring"""
snake_case = self.full_loop(prediction_type='v_prediction' )
snake_case = torch.sum(torch.abs(lowerCAmelCase ) )
snake_case = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def snake_case ( self ):
"""simple docstring"""
snake_case = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01 )
snake_case = torch.sum(torch.abs(lowerCAmelCase ) )
snake_case = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def snake_case ( self ):
"""simple docstring"""
snake_case = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01 )
snake_case = torch.sum(torch.abs(lowerCAmelCase ) )
snake_case = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 150 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = """bert-generation"""
def __init__( self , lowerCAmelCase=5_03_58 , lowerCAmelCase=10_24 , lowerCAmelCase=24 , lowerCAmelCase=16 , lowerCAmelCase=40_96 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
| 150 | 1 |
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase__ : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Union[str, Any] = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
lowerCAmelCase__ : Union[str, Any] = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Any = get_test_to_tester_mapping(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = get_test_to_tester_mapping(UpperCAmelCase_ )
__UpperCAmelCase : str = {"BertModelTest": "BertModelTester"}
__UpperCAmelCase : List[str] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(get_test_info.to_json(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Dict = get_model_to_test_mapping(UpperCAmelCase_ )
__UpperCAmelCase : int = get_model_to_test_mapping(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__UpperCAmelCase : Tuple = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(get_test_info.to_json(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = get_model_to_tester_mapping(UpperCAmelCase_ )
__UpperCAmelCase : List[str] = get_model_to_tester_mapping(UpperCAmelCase_ )
__UpperCAmelCase : Dict = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__UpperCAmelCase : str = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(get_test_info.to_json(UpperCAmelCase_ ) , UpperCAmelCase_ )
| 352 |
'''simple docstring'''
from collections.abc import Callable
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : float = a
__UpperCAmelCase : float = b
if function(_UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_UpperCAmelCase ) == 0:
return b
elif (
function(_UpperCAmelCase ) * function(_UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
__UpperCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_UpperCAmelCase ) == 0:
return mid
elif function(_UpperCAmelCase ) * function(_UpperCAmelCase ) < 0:
__UpperCAmelCase : int = mid
else:
__UpperCAmelCase : Dict = mid
__UpperCAmelCase : str = start + (end - start) / 2.0
return mid
def __UpperCamelCase ( _UpperCAmelCase ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 37 | 0 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =FileLock(str(tmpdir / """foo.lock""" ) )
SCREAMING_SNAKE_CASE_: Tuple =FileLock(str(tmpdir / """foo.lock""" ) )
SCREAMING_SNAKE_CASE_: str =0.01
with locka.acquire():
with pytest.raises(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE_: List[Any] =time.time()
locka.acquire(UpperCamelCase_ )
assert time.time() - _start > timeout
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any ="""a""" * 1000 + """.lock"""
SCREAMING_SNAKE_CASE_: str =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCamelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
SCREAMING_SNAKE_CASE_: List[Any] =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCamelCase_ ):
locka.acquire(0 )
| 173 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class a ( _A , _A ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 'convnextv2'
def __init__( self : Optional[int] , __snake_case : Optional[int]=3 , __snake_case : Tuple=4 , __snake_case : Any=4 , __snake_case : List[Any]=None , __snake_case : Dict=None , __snake_case : Any="gelu" , __snake_case : int=0.02 , __snake_case : Tuple=1E-12 , __snake_case : str=0.0 , __snake_case : Union[str, Any]=2_24 , __snake_case : Dict=None , __snake_case : Tuple=None , **__snake_case : List[str] , ):
super().__init__(**__snake_case )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_stages
UpperCAmelCase_ = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
UpperCAmelCase_ = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = image_size
UpperCAmelCase_ = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names )
| 177 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 177 | 1 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(_lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Union[str, Any] ) ->str:
'''simple docstring'''
a : Dict = _distribute_shards(**_lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Optional[int] , _lowercase : List[Any] ) ->Any:
'''simple docstring'''
a : Any = _split_gen_kwargs(_lowercase , _lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str ) ->List[Any]:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_lowercase ):
_number_of_shards_in_gen_kwargs(_lowercase )
else:
a : Union[str, Any] = _number_of_shards_in_gen_kwargs(_lowercase )
assert out == expected
| 105 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a : int = '''<<<<<<< This should probably be modified because it mentions: '''
a : Tuple = '''=======
>>>>>>>
'''
a : List[Any] = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
a : Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _SCREAMING_SNAKE_CASE ( _lowercase : Namespace ) ->Any:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __UpperCamelCase ( a__ ):
@staticmethod
def __a ( lowerCAmelCase__ ) -> int:
a : Any = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ ) -> Tuple:
a : List[str] = get_logger("datasets-cli/converting" )
a : Optional[int] = tfds_path
a : str = datasets_directory
def __a ( self ) -> str:
if os.path.isdir(self._tfds_path ):
a : int = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a : Optional[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
a : str = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
a : str = []
a : Union[str, Any] = []
a : str = {}
if os.path.isdir(self._tfds_path ):
a : Dict = os.listdir(lowerCAmelCase__ )
else:
a : Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
a : Any = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not os.path.isfile(lowerCAmelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCAmelCase__ , encoding="utf-8" ) as f:
a : Any = f.readlines()
a : Tuple = []
a : Any = False
a : Dict = False
a : Dict = []
for line in lines:
a : Tuple = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a : Optional[int] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a : Optional[Any] = ""
continue
elif "from absl import logging" in out_line:
a : Any = "from datasets import logging\n"
elif "getLogger" in out_line:
a : int = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a : Optional[Any] = True
a : Optional[Any] = list(filter(lambda lowerCAmelCase__ : e in out_line , lowerCAmelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase__ ) + "\n" )
out_lines.append(lowerCAmelCase__ )
out_lines.append(lowerCAmelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
a : int = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a : int = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCAmelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
a : List[str] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a : str = True
out_lines.append(lowerCAmelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a : Tuple = f_name.replace(".py" , "" )
a : Optional[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase__ )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCAmelCase__ )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
a : int = os.path.basename(lowerCAmelCase__ )
a : int = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(lowerCAmelCase__ , lowerCAmelCase__ )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 105 | 1 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class lowerCAmelCase__ :
def __init__( self : str ):
_snake_case = [2, 1, 2, -1]
_snake_case = [1, 2, 3, 4]
def lowercase ( self : Dict ):
_snake_case = len(self.first_signal )
_snake_case = len(self.second_signal )
_snake_case = max(_lowerCamelCase , _lowerCamelCase )
# create a zero matrix of max_length x max_length
_snake_case = [[0] * max_length for i in range(_lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_lowerCamelCase ):
_snake_case = deque(self.second_signal )
rotated_signal.rotate(_lowerCamelCase )
for j, item in enumerate(_lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
_snake_case = np.matmul(np.transpose(_lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 40 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase__ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase__ = {
'allenai/led-base-16384': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> List[Any]:
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int]="replace" , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : Any="<pad>" , _lowerCamelCase : Union[str, Any]="<mask>" , _lowerCamelCase : Optional[int]=False , **_lowerCamelCase : str , ):
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
_snake_case = json.load(_lowerCamelCase )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
_snake_case = merges_handle.read().split('''\n''' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase ( self : Tuple ):
return len(self.encoder )
def lowercase ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self : Dict , _lowerCamelCase : str ):
if token in self.cache:
return self.cache[token]
_snake_case = tuple(_lowerCamelCase )
_snake_case = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
_snake_case = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(_lowerCamelCase ):
try:
_snake_case = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(_lowerCamelCase )
_snake_case = new_word
if len(_lowerCamelCase ) == 1:
break
else:
_snake_case = get_pairs(_lowerCamelCase )
_snake_case = ''' '''.join(_lowerCamelCase )
_snake_case = word
return word
def lowercase ( self : str , _lowerCamelCase : Dict ):
_snake_case = []
for token in re.findall(self.pat , _lowerCamelCase ):
_snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[str] ):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase ( self : Optional[int] , _lowerCamelCase : Dict ):
return self.decoder.get(_lowerCamelCase )
def lowercase ( self : Dict , _lowerCamelCase : Union[str, Any] ):
_snake_case = ''''''.join(_lowerCamelCase )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '''\n''' )
_snake_case = 0
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_snake_case = token_index
writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase ( self : str , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Any , _lowerCamelCase : int , _lowerCamelCase : Any=False , **_lowerCamelCase : List[Any] ):
_snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
_snake_case = ''' ''' + text
return (text, kwargs)
def lowercase ( self : int , _lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , ):
_snake_case = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
_snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
_snake_case = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 40 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.