code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__a: List[str] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> Union[str, Any]:
super().__init__()
lowercase__ : Optional[Any] = torchvision.models.resnetaaa(pretrained=__lowerCAmelCase )
lowercase__ : Optional[Any] = list(model.children() )[:-2]
lowercase__ : Tuple = nn.Sequential(*__lowerCAmelCase )
lowercase__ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Dict:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowercase__ : Dict = self.pool(self.model(__lowerCAmelCase ) )
lowercase__ : int = torch.flatten(__lowerCAmelCase , start_dim=2 )
lowercase__ : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
lowercase__ : Union[str, Any] = [json.loads(__lowerCAmelCase ) for l in open(__lowerCAmelCase )]
lowercase__ : Optional[int] = os.path.dirname(__lowerCAmelCase )
lowercase__ : int = tokenizer
lowercase__ : Optional[int] = labels
lowercase__ : str = len(__lowerCAmelCase )
lowercase__ : Optional[Any] = max_seq_length
lowercase__ : Dict = transforms
def __len__( self ) -> Dict:
return len(self.data )
def __getitem__( self , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=__lowerCAmelCase ) )
lowercase__ , lowercase__ , lowercase__ : Any = sentence[0], sentence[1:-1], sentence[-1]
lowercase__ : Dict = sentence[: self.max_seq_length]
lowercase__ : Any = torch.zeros(self.n_classes )
lowercase__ : Dict = 1
lowercase__ : Optional[int] = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' )
lowercase__ : Union[str, Any] = self.transforms(__lowerCAmelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _lowerCAmelCase( self ) -> str:
lowercase__ : Dict = Counter()
for row in self.data:
label_freqs.update(row['''label'''] )
return label_freqs
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Tuple = [len(row['''sentence'''] ) for row in batch]
lowercase__ , lowercase__ : Any = len(UpperCAmelCase ), max(UpperCAmelCase )
lowercase__ : str = torch.zeros(UpperCAmelCase , UpperCAmelCase , dtype=torch.long )
lowercase__ : Dict = torch.zeros(UpperCAmelCase , UpperCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(UpperCAmelCase , UpperCAmelCase ) ):
lowercase__ : Tuple = input_row['''sentence''']
lowercase__ : List[str] = 1
lowercase__ : Dict = torch.stack([row['''image'''] for row in batch] )
lowercase__ : int = torch.stack([row['''label'''] for row in batch] )
lowercase__ : Optional[int] = torch.stack([row['''image_start_token'''] for row in batch] )
lowercase__ : Optional[Any] = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __UpperCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __UpperCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 198
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase__ : Dict = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase__ : Any = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase__ : List[Any] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase__ : str = model(__lowerCAmelCase , labels=__lowerCAmelCase ).loss
lowercase__ : List[str] = -tf.math.reduce_mean(__lowerCAmelCase ).numpy()
lowercase__ : str = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 198
| 1
|
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __A ( a__ ):
"""simple docstring"""
UpperCamelCase__ : int ="""Salesforce/blip-image-captioning-base"""
UpperCamelCase__ : Dict =(
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
UpperCamelCase__ : Any ="""image_captioner"""
UpperCamelCase__ : Union[str, Any] =AutoModelForVisionaSeq
UpperCamelCase__ : List[Any] =["""image"""]
UpperCamelCase__ : List[str] =["""text"""]
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.pre_processor(images=lowerCamelCase__ , return_tensors='pt' )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.model.generate(**lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return self.pre_processor.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )[0].strip()
| 370
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ :List[str] = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
A_ :int = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] ="""mask2former"""
UpperCamelCase__ : Tuple =["""swin"""]
UpperCamelCase__ : Dict ={"""hidden_size""": """hidden_dim"""}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__UpperCamelCase : Optional[int] =CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : List[str] =backbone_config.pop('model_type' )
__UpperCamelCase : str =CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase : List[Any] =config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
__UpperCamelCase : Dict =backbone_config
__UpperCamelCase : Optional[int] =feature_size
__UpperCamelCase : Union[str, Any] =mask_feature_size
__UpperCamelCase : Tuple =hidden_dim
__UpperCamelCase : Optional[int] =encoder_feedforward_dim
__UpperCamelCase : Optional[int] =activation_function
__UpperCamelCase : Dict =encoder_layers
__UpperCamelCase : List[Any] =decoder_layers
__UpperCamelCase : int =num_attention_heads
__UpperCamelCase : Optional[Any] =dropout
__UpperCamelCase : int =dim_feedforward
__UpperCamelCase : Any =pre_norm
__UpperCamelCase : Union[str, Any] =enforce_input_projection
__UpperCamelCase : str =common_stride
__UpperCamelCase : List[str] =ignore_value
__UpperCamelCase : Optional[int] =num_queries
__UpperCamelCase : Any =no_object_weight
__UpperCamelCase : int =class_weight
__UpperCamelCase : str =mask_weight
__UpperCamelCase : Dict =dice_weight
__UpperCamelCase : str =train_num_points
__UpperCamelCase : str =oversample_ratio
__UpperCamelCase : int =importance_sample_ratio
__UpperCamelCase : List[str] =init_std
__UpperCamelCase : Union[str, Any] =init_xavier_std
__UpperCamelCase : Any =use_auxiliary_loss
__UpperCamelCase : Tuple =feature_strides
__UpperCamelCase : Dict =output_auxiliary_logits
__UpperCamelCase : Union[str, Any] =decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def __lowercase ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =copy.deepcopy(self.__dict__ )
__UpperCamelCase : List[Any] =self.backbone_config.to_dict()
__UpperCamelCase : Union[str, Any] =self.__class__.model_type
return output
| 245
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = LDMTextToImagePipeline
__lowercase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
__lowercase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
__lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase = False
def lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_snake_case = CLIPTextModel(lowerCAmelCase_ )
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ):
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith('mps' ):
_snake_case = torch.manual_seed(lowerCAmelCase_ )
else:
_snake_case = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.get_dummy_components()
_snake_case = LDMTextToImagePipeline(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_dummy_inputs(lowerCAmelCase_ )
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_snake_case = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = torch.manual_seed(lowerCAmelCase_ )
_snake_case = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 32, 32) )
_snake_case = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_inputs(lowerCAmelCase_ )
_snake_case = pipe(**lowerCAmelCase_ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
_snake_case = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
_snake_case = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=torch.floataa , lowerCAmelCase_=0 ):
"""simple docstring"""
_snake_case = torch.manual_seed(lowerCAmelCase_ )
_snake_case = np.random.RandomState(lowerCAmelCase_ ).standard_normal((1, 4, 32, 32) )
_snake_case = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = self.get_inputs(lowerCAmelCase_ )
_snake_case = pipe(**lowerCAmelCase_ ).images[0]
_snake_case = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_snake_case = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 42
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''detr'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=100 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple="sine" , lowerCAmelCase_ : str="resnet50" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = backbone_config.get("model_type" )
UpperCAmelCase_ : Dict = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Tuple = config_class.from_dict(lowerCAmelCase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : str = use_timm_backbone
UpperCAmelCase_ : Optional[Any] = backbone_config
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Dict = num_queries
UpperCAmelCase_ : str = d_model
UpperCAmelCase_ : Any = encoder_ffn_dim
UpperCAmelCase_ : Union[str, Any] = encoder_layers
UpperCAmelCase_ : Optional[int] = encoder_attention_heads
UpperCAmelCase_ : List[str] = decoder_ffn_dim
UpperCAmelCase_ : Tuple = decoder_layers
UpperCAmelCase_ : Optional[int] = decoder_attention_heads
UpperCAmelCase_ : List[Any] = dropout
UpperCAmelCase_ : Union[str, Any] = attention_dropout
UpperCAmelCase_ : int = activation_dropout
UpperCAmelCase_ : List[str] = activation_function
UpperCAmelCase_ : Optional[int] = init_std
UpperCAmelCase_ : Union[str, Any] = init_xavier_std
UpperCAmelCase_ : List[str] = encoder_layerdrop
UpperCAmelCase_ : Tuple = decoder_layerdrop
UpperCAmelCase_ : str = encoder_layers
UpperCAmelCase_ : Any = auxiliary_loss
UpperCAmelCase_ : Optional[int] = position_embedding_type
UpperCAmelCase_ : List[str] = backbone
UpperCAmelCase_ : int = use_pretrained_backbone
UpperCAmelCase_ : Any = dilation
# Hungarian matcher
UpperCAmelCase_ : str = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : List[str] = mask_loss_coefficient
UpperCAmelCase_ : Dict = dice_loss_coefficient
UpperCAmelCase_ : Any = bbox_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase_ : int = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
return self.d_model
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Tuple ) -> List[Any]:
return cls(backbone_config=lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, any]:
UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : Any = self.__class__.model_type
return output
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
return 1e-5
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return 12
| 268
| 0
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ : int = get_activation('swish' )
self.assertIsInstance(UpperCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A_ ( self : Any ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = get_activation('silu' )
self.assertIsInstance(UpperCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A_ ( self : Dict ) -> str:
lowerCamelCase__ : int = get_activation('mish' )
self.assertIsInstance(UpperCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def A_ ( self : List[Any] ) -> Tuple:
lowerCamelCase__ : Dict = get_activation('gelu' )
self.assertIsInstance(UpperCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 45
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase : List[str] = logging.getLogger()
_UpperCAmelCase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> List[Any]:
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowerCamelCase__ : Tuple = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase__ : str = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase__ : Optional[int] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : str = "pytorch" ) -> str:
lowerCamelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'output' )
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
lowerCamelCase__ : Dict = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase__ : Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
lowerCamelCase__ : Dict = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
lowerCamelCase__ : Dict = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ : List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def A_ ( self : Any ) -> List[Any]:
lowerCamelCase__ : str = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def A_ ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : Tuple = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 45
| 1
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_a = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 209
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = (UnCLIPScheduler,)
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCAmelCase )
return config
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCAmelCase , prev_timestep=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.999_4987 ) ) < 1E-5
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCAmelCase ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=__lowerCAmelCase ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=__lowerCAmelCase ) - -0.001_0011 < 1E-5
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(2_5 )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
lowerCamelCase__ = None
else:
lowerCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prev_timestep=__lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
| 209
| 1
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowerCamelCase : Optional[int] = "bart"
_lowerCamelCase : List[str] = True
@st.cache(allow_output_mutation=UpperCamelCase_ )
def _UpperCAmelCase ():
'''simple docstring'''
if LOAD_DENSE_INDEX:
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
_lowerCAmelCase : str = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
_lowerCAmelCase : Union[str, Any] = qar_model.eval()
else:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = (None, None)
if MODEL_TYPE == "bart":
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
_lowerCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
_lowerCAmelCase : Tuple = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
_lowerCAmelCase : Union[str, Any] = sas_model.eval()
else:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase_ )
def _UpperCAmelCase ():
'''simple docstring'''
if LOAD_DENSE_INDEX:
_lowerCAmelCase : Optional[Any] = faiss.StandardGpuResources()
_lowerCAmelCase : Tuple = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
_lowerCAmelCase : int = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
_lowerCAmelCase : Union[str, Any] = faiss.IndexFlatIP(128 )
_lowerCAmelCase : Optional[Any] = faiss.index_cpu_to_gpu(UpperCamelCase_ , 1 , UpperCamelCase_ )
wikiaab_gpu_index_flat.add(UpperCamelCase_ ) # TODO fix for larger GPU
else:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = (None, None)
_lowerCAmelCase : List[str] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase_ )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : List[str] = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
_lowerCAmelCase : Optional[int] = elia["""train_eli5"""]
_lowerCAmelCase : Optional[Any] = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
_lowerCAmelCase : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(UpperCamelCase_ )
return (elia_train, eli5_train_q_index)
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = load_indexes()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = load_models()
_lowerCamelCase , _lowerCamelCase : Optional[int] = load_train_data()
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any]=10 ):
'''simple docstring'''
_lowerCAmelCase : str = embed_questions_for_retrieval([question] , UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = eli5_train_q_index.search(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Union[str, Any] = [elia_train[int(UpperCamelCase_ )] for i in I[0]]
return nn_examples
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="wiki40b" , UpperCamelCase_ : Optional[int]="dense" , UpperCamelCase_ : Union[str, Any]=10 ):
'''simple docstring'''
if source == "none":
_lowerCAmelCase , _lowerCAmelCase : List[str] = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = query_qa_dense_index(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = query_es_index(
UpperCamelCase_ , UpperCamelCase_ , index_name="""english_wiki40b_snippets_100w""" , n_results=UpperCamelCase_ , )
_lowerCAmelCase : int = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
_lowerCAmelCase : Tuple = """question: {} context: {}""".format(UpperCamelCase_ , UpperCamelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase_ : None),
} )
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any]=64 , UpperCamelCase_ : List[str]=256 , UpperCamelCase_ : Dict=False , UpperCamelCase_ : Any=2 , UpperCamelCase_ : str=0.95 , UpperCamelCase_ : Optional[int]=0.8 ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = qa_sas_generate(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_answers=1 , num_beams=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ , do_sample=UpperCamelCase_ , temp=UpperCamelCase_ , top_p=UpperCamelCase_ , top_k=UpperCamelCase_ , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
_lowerCamelCase : Optional[int] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
_lowerCamelCase : Any = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowerCamelCase : List[str] = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowerCamelCase : Any = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
_lowerCamelCase : int = st.sidebar.checkbox("Demo options")
if demo_options:
_lowerCamelCase : Optional[Any] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
_lowerCamelCase : Dict = action_list.index(action_st)
_lowerCamelCase : Tuple = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
_lowerCamelCase : int = show_type == "Show full text of passages"
else:
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : str = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
_lowerCamelCase : Dict = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
_lowerCamelCase : Optional[Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
_lowerCamelCase : Any = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
_lowerCamelCase : Union[str, Any] = "wiki40b"
_lowerCamelCase : List[str] = "dense"
_lowerCamelCase : Union[str, Any] = "beam"
_lowerCamelCase : int = 2
_lowerCamelCase : Optional[Any] = 6_4
_lowerCamelCase : Dict = 2_5_6
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : str = None
_lowerCamelCase : int = st.sidebar.checkbox("Generation options")
if generate_options:
_lowerCamelCase : Tuple = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
_lowerCamelCase : Optional[int] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
_lowerCamelCase : Optional[int] = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
_lowerCamelCase : Any = st.sidebar.slider(
"Maximum generation length", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
_lowerCamelCase : Optional[int] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowerCamelCase : Union[str, Any] = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
_lowerCamelCase : List[Any] = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
_lowerCamelCase : int = None
# start main text
_lowerCamelCase : Optional[Any] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
_lowerCamelCase : Tuple = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowerCamelCase : Tuple = st.text_input("Enter your question here:", "")
else:
_lowerCamelCase : Optional[Any] = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowerCamelCase , _lowerCamelCase : Optional[Any] = make_support(question, source=wiki_source, method="dense", n_results=1_0)
_lowerCamelCase , _lowerCamelCase : Dict = make_support(question, source=wiki_source, method="sparse", n_results=1_0)
_lowerCamelCase : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowerCamelCase : Optional[Any] = support_list[:1_0]
_lowerCamelCase : Optional[int] = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
_lowerCamelCase , _lowerCamelCase : Dict = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
_lowerCamelCase , _lowerCamelCase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
_lowerCamelCase : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
_lowerCamelCase : Tuple = res[1].strip()
if sec_titles == "":
_lowerCamelCase : Optional[Any] = "[{}]({})".format(res[0], wiki_url)
else:
_lowerCamelCase : Optional[int] = sec_titles.split(" & ")
_lowerCamelCase : int = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
_lowerCamelCase : int = find_nearest_training(question)
_lowerCamelCase : int = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
_lowerCamelCase : Optional[Any] = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
_lowerCamelCase : int = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 159
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_lowerCamelCase : List[Any] = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_lowerCamelCase : List[str] = {
"allenai/led-base-16384": 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : str = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
_lowerCAmelCase : Any = bs[:]
_lowerCAmelCase : str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : int = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def _UpperCAmelCase (UpperCamelCase_ : Any ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Dict = char
return pairs
class __snake_case (_a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any]="replace" , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : List[str]="</s>" , _UpperCAmelCase : Any="<s>" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : Any="<pad>" , _UpperCAmelCase : Tuple="<mask>" , _UpperCAmelCase : int=False , **_UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Any = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
_lowerCAmelCase : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
_lowerCAmelCase : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
_lowerCAmelCase : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
_lowerCAmelCase : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
_lowerCAmelCase : Any = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Tuple = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase : Dict = json.load(_UpperCAmelCase )
_lowerCAmelCase : Any = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Optional[Any] = errors # how to handle errors in decoding
_lowerCAmelCase : Dict = bytes_to_unicode()
_lowerCAmelCase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase : Tuple = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCAmelCase : Union[str, Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Any = tuple(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
_lowerCAmelCase : Optional[int] = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : int = 0
while i < len(_UpperCAmelCase ):
try:
_lowerCAmelCase : Optional[int] = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : List[str] = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Tuple = tuple(_UpperCAmelCase )
_lowerCAmelCase : Dict = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
_lowerCAmelCase : Union[str, Any] = get_pairs(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = """ """.join(_UpperCAmelCase )
_lowerCAmelCase : Any = word
return word
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for token in re.findall(self.pat , _UpperCAmelCase ):
_lowerCAmelCase : Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = """""".join(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + """\n""" )
_lowerCAmelCase : Dict = 0
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase : str = token_index
writer.write(""" """.join(_UpperCAmelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
_lowerCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowerCAmelCase : str = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=False , **_UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
_lowerCAmelCase : Optional[Any] = """ """ + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
_lowerCAmelCase : List[Any] = super()._pad(
encoded_inputs=_UpperCAmelCase , max_length=_UpperCAmelCase , padding_strategy=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
_lowerCAmelCase : List[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCAmelCase : str = len(encoded_inputs["""global_attention_mask"""] ) != len(_UpperCAmelCase )
if needs_to_be_padded:
_lowerCAmelCase : Optional[Any] = len(_UpperCAmelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCAmelCase : Union[str, Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCAmelCase : List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 159
| 1
|
'''simple docstring'''
def snake_case_ (_a : list[list[int]] , _a : int , _a : int , _a : list[int] ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case_ (_a : list[list[int]] , _a : list[int] , _a : int ):
# Base Case
if curr_ind == len(_a ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_a ) ):
if valid_connection(_a , _a , _a , _a ):
# Insert current vertex into path as next transition
UpperCAmelCase = next_ver
# Validate created path
if util_hamilton_cycle(_a , _a , curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def snake_case_ (_a : list[list[int]] , _a : int = 0 ):
UpperCAmelCase = [-1] * (len(_a ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase = UpperCAmelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_a , _a , 1 ) else []
| 34
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
for token in tokens:
UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
UpperCamelCase = list(UpperCamelCase_ )
return word_list
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
UpperCamelCase = bert_tokens
UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ )
while start < end:
UpperCamelCase = True
if is_chinese(bert_word[start] ):
UpperCamelCase = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
UpperCamelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase = """##""" + bert_word[j]
UpperCamelCase = start + i
UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = []
for id in input_ids:
UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase = LTP(args.ltp ) # faster in GPU device
UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 343
| 0
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = TapasConfig.from_json_file(_snake_case )
# set absolute/relative position embeddings parameter
UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase = TapasForQuestionAnswering(config=_snake_case )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase = 4
UpperCAmelCase = True
# hparam_utils.py hparams
UpperCAmelCase = 0.664694
UpperCAmelCase = 0.207951
UpperCAmelCase = 0.121194
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = 0.0352513
UpperCAmelCase = TapasForQuestionAnswering(config=_snake_case )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase = 4
UpperCAmelCase = False
# hparam_utils.py hparams
UpperCAmelCase = 36.4519
UpperCAmelCase = 0.903421
UpperCAmelCase = 222.088
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 0.763141
UpperCAmelCase = TapasForQuestionAnswering(config=_snake_case )
elif task == "TABFACT":
UpperCAmelCase = TapasForSequenceClassification(config=_snake_case )
elif task == "MLM":
UpperCAmelCase = TapasForMaskedLM(config=_snake_case )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase = TapasModel(config=_snake_case )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_snake_case , _snake_case , _snake_case )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_snake_case )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(_snake_case )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 234
|
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_UpperCamelCase = None
_UpperCamelCase = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_UpperCamelCase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def _a ( _snake_case , _snake_case=1 , _snake_case=256 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _a ( _snake_case ):
"""simple docstring"""
with open(_snake_case , """r""" ) as f:
return json.load(_snake_case )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
with open(_snake_case , """w""" ) as f:
json.dump(_snake_case , _snake_case )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=True ):
"""simple docstring"""
os.makedirs(_snake_case , exist_ok=_snake_case )
UpperCAmelCase = os.path.join(_snake_case , """tmp""" )
os.makedirs(_snake_case , exist_ok=_snake_case )
UpperCAmelCase = read_json(os.path.join(_snake_case , """params.json""" ) )
UpperCAmelCase = NUM_SHARDS[model_size]
UpperCAmelCase = params["""n_layers"""]
UpperCAmelCase = params["""n_heads"""]
UpperCAmelCase = n_heads // num_shards
UpperCAmelCase = params["""dim"""]
UpperCAmelCase = dim // n_heads
UpperCAmelCase = 10000.0
UpperCAmelCase = 1.0 / (base ** (torch.arange(0 , _snake_case , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
UpperCAmelCase = n_heads_per_shard // num_key_value_heads
UpperCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase = n_heads
UpperCAmelCase = n_heads_per_shard
UpperCAmelCase = dim
# permute for sliced rotary
def permute(_snake_case , _snake_case=n_heads , _snake_case=dim , _snake_case=dim ):
return w.view(_snake_case , dima // n_heads // 2 , 2 , _snake_case ).transpose(1 , 2 ).reshape(_snake_case , _snake_case )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase = torch.load(os.path.join(_snake_case , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
UpperCAmelCase = [
torch.load(os.path.join(_snake_case , F'''consolidated.{i:02d}.pth''' ) , map_location="""cpu""" )
for i in range(_snake_case )
]
UpperCAmelCase = 0
UpperCAmelCase = {"""weight_map""": {}}
for layer_i in range(_snake_case ):
UpperCAmelCase = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
UpperCAmelCase = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
UpperCAmelCase = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(_snake_case , _snake_case , _snake_case )
for i in range(_snake_case )
] , dim=0 , ).reshape(_snake_case , _snake_case ) )
UpperCAmelCase = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
_snake_case , _snake_case , _snake_case )
for i in range(_snake_case )
] , dim=0 , ).reshape(_snake_case , _snake_case ) , _snake_case , _snake_case , _snake_case , )
UpperCAmelCase = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
_snake_case , _snake_case , _snake_case )
for i in range(_snake_case )
] , dim=0 , ).reshape(_snake_case , _snake_case )
UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(_snake_case )] , dim=1 )
UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_snake_case )] , dim=0 )
UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_snake_case )] , dim=1 )
UpperCAmelCase = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_snake_case )] , dim=0 )
UpperCAmelCase = inv_freq
for k, v in state_dict.items():
UpperCAmelCase = filename
param_count += v.numel()
torch.save(_snake_case , os.path.join(_snake_case , _snake_case ) )
UpperCAmelCase = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
UpperCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
UpperCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_snake_case )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_snake_case )] , dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase = filename
param_count += v.numel()
torch.save(_snake_case , os.path.join(_snake_case , _snake_case ) )
# Write configs
UpperCAmelCase = {"""total_size""": param_count * 2}
write_json(_snake_case , os.path.join(_snake_case , """pytorch_model.bin.index.json""" ) )
UpperCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
UpperCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
UpperCAmelCase = LlamaConfig(
hidden_size=_snake_case , intermediate_size=compute_intermediate_size(_snake_case , _snake_case , _snake_case ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=_snake_case , )
config.save_pretrained(_snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
UpperCAmelCase = LlamaForCausalLM.from_pretrained(_snake_case , torch_dtype=torch.floataa , low_cpu_mem_usage=_snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_snake_case , safe_serialization=_snake_case )
shutil.rmtree(_snake_case )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
UpperCAmelCase = tokenizer_class(_snake_case )
tokenizer.save_pretrained(_snake_case )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=_snake_case , help="""Whether or not to save using `safetensors`.""" )
UpperCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
UpperCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , _snake_case )
if __name__ == "__main__":
main()
| 234
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class UpperCAmelCase ( __lowercase ):
A__ : Any = "lxmert"
A__ : Dict = {}
def __init__(self : Optional[int] , snake_case__ : Tuple=3_05_22 , snake_case__ : List[str]=7_68 , snake_case__ : Dict=12 , snake_case__ : Optional[int]=95_00 , snake_case__ : int=16_00 , snake_case__ : int=4_00 , snake_case__ : Any=30_72 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : str=0.1 , snake_case__ : str=0.1 , snake_case__ : Dict=5_12 , snake_case__ : Union[str, Any]=2 , snake_case__ : Optional[int]=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : Tuple=9 , snake_case__ : Dict=5 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=20_48 , snake_case__ : str=4 , snake_case__ : List[str]=6.67 , snake_case__ : List[Any]=True , snake_case__ : List[str]=True , snake_case__ : int=True , snake_case__ : Any=True , snake_case__ : Optional[int]=True , snake_case__ : Optional[Any]=True , snake_case__ : Any=True , **snake_case__ : int , ) -> Any:
'''simple docstring'''
snake_case : Any = vocab_size
snake_case : List[str] = hidden_size
snake_case : Optional[int] = num_attention_heads
snake_case : List[Any] = hidden_act
snake_case : str = intermediate_size
snake_case : Dict = hidden_dropout_prob
snake_case : Optional[int] = attention_probs_dropout_prob
snake_case : Optional[int] = max_position_embeddings
snake_case : Dict = type_vocab_size
snake_case : Union[str, Any] = initializer_range
snake_case : Dict = layer_norm_eps
snake_case : List[Any] = num_qa_labels
snake_case : List[Any] = num_object_labels
snake_case : List[Any] = num_attr_labels
snake_case : Optional[int] = l_layers
snake_case : Tuple = x_layers
snake_case : Dict = r_layers
snake_case : List[str] = visual_feat_dim
snake_case : Optional[int] = visual_pos_dim
snake_case : Any = visual_loss_normalizer
snake_case : int = task_matched
snake_case : str = task_mask_lm
snake_case : Optional[int] = task_obj_predict
snake_case : Union[str, Any] = task_qa
snake_case : Union[str, Any] = visual_obj_loss
snake_case : Optional[int] = visual_attr_loss
snake_case : List[str] = visual_feat_loss
snake_case : List[Any] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**snake_case__ )
| 59
|
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Union[str, Any] ) -> None:
__UpperCamelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
__UpperCamelCase : List[str] = False
def _lowerCamelCase ( self :Any , a :list[str] ) -> None:
for word in words:
self.insert(a )
def _lowerCamelCase ( self :List[str] , a :str ) -> None:
__UpperCamelCase : Dict = self
for char in word:
if char not in curr.nodes:
__UpperCamelCase : List[Any] = TrieNode()
__UpperCamelCase : List[Any] = curr.nodes[char]
__UpperCamelCase : Union[str, Any] = True
def _lowerCamelCase ( self :Optional[int] , a :str ) -> bool:
__UpperCamelCase : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
return False
__UpperCamelCase : Union[str, Any] = curr.nodes[char]
return curr.is_leaf
def _lowerCamelCase ( self :Any , a :str ) -> None:
def _delete(a :TrieNode , a :str , a :int ) -> bool:
if index == len(a ):
# If word does not exist
if not curr.is_leaf:
return False
__UpperCamelCase : str = False
return len(curr.nodes ) == 0
__UpperCamelCase : List[Any] = word[index]
__UpperCamelCase : Optional[int] = curr.nodes.get(a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__UpperCamelCase : int = _delete(a , a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , a , 0 )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : TrieNode , _lowerCamelCase : str) -> None:
'''simple docstring'''
if node.is_leaf:
print(_lowerCamelCase , end=" ")
for key, value in node.nodes.items():
print_words(_lowerCamelCase , word + key)
def _SCREAMING_SNAKE_CASE ( ) -> bool:
'''simple docstring'''
__UpperCamelCase : int = "banana bananas bandana band apple all beast".split()
__UpperCamelCase : Union[str, Any] = TrieNode()
root.insert_many(_lowerCamelCase)
# print_words(root, "")
assert all(root.find(_lowerCamelCase) for word in words)
assert root.find("banana")
assert not root.find("bandanas")
assert not root.find("apps")
assert root.find("apple")
assert root.find("all")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : bool) -> None:
'''simple docstring'''
print(str(_lowerCamelCase) , "works!" if passes else "doesn't work :(")
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
assert test_trie()
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
print_results("Testing trie functionality" , test_trie())
if __name__ == "__main__":
main()
| 232
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
return str(lowerCAmelCase__ ) == str(lowerCAmelCase__ )[::-1]
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
return int(lowerCAmelCase__ ) + int(str(lowerCAmelCase__ )[::-1] )
def __UpperCAmelCase ( UpperCAmelCase_ : int = 1_00_00 ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = []
for num in range(1 , lowerCAmelCase__ ):
__snake_case : List[str] = 0
__snake_case : List[Any] = num
while iterations < 50:
__snake_case : Tuple = sum_reverse(lowerCAmelCase__ )
iterations += 1
if is_palindrome(lowerCAmelCase__ ):
break
else:
lychrel_nums.append(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 366
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Union[str, Any]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : str= "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class UpperCamelCase ( lowercase ):
@staticmethod
def _lowercase (_A : ArgumentParser) -> Tuple:
__snake_case : Optional[Any] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=_A , required=_A , help='Model\'s type.')
train_parser.add_argument(
'--tf_checkpoint' , type=_A , required=_A , help='TensorFlow checkpoint path or folder.')
train_parser.add_argument(
'--pytorch_dump_output' , type=_A , required=_A , help='Path to the PyTorch saved model output.')
train_parser.add_argument('--config' , type=_A , default='' , help='Configuration file path or folder.')
train_parser.add_argument(
'--finetuning_task_name' , type=_A , default=_A , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=_A)
def __init__(self : List[str] , _A : str , _A : str , _A : str , _A : str , _A : str , *_A : Any , ) -> Optional[Any]:
__snake_case : List[Any] = logging.get_logger('transformers-cli/converting')
self._logger.info(f"Loading model {model_type}")
__snake_case : List[str] = model_type
__snake_case : int = tf_checkpoint
__snake_case : Optional[int] = pytorch_dump_output
__snake_case : Optional[Any] = config
__snake_case : Optional[Any] = finetuning_task_name
def _lowercase (self : List[str]) -> str:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
if "ckpt" in self._tf_checkpoint.lower():
__snake_case : Union[str, Any] = self._tf_checkpoint
__snake_case : List[Any] = ''
else:
__snake_case : Optional[Any] = self._tf_checkpoint
__snake_case : List[Any] = ''
convert_transfo_xl_checkpoint_to_pytorch(
_A , self._config , self._pytorch_dump_output , _A)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]')
| 95
| 0
|
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__snake_case : Any = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: List[str] , **_SCREAMING_SNAKE_CASE: Any) -> Tuple:
"""simple docstring"""
super().__init__(**_A)
requires_backends(self , "vision")
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self: Any , _SCREAMING_SNAKE_CASE: Union[str, List[str], "Image", List["Image"]] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(_A , **_A)
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , **_SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = {}
if "candidate_labels" in kwargs:
__lowerCAmelCase : List[str] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__lowerCAmelCase : Optional[Any] = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: str=None , _SCREAMING_SNAKE_CASE: Tuple="This is a photo of {}.") -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = load_image(_A)
__lowerCAmelCase : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework)
__lowerCAmelCase : Optional[Any] = candidate_labels
__lowerCAmelCase : Optional[int] = [hypothesis_template.format(_A) for x in candidate_labels]
__lowerCAmelCase : Tuple = self.tokenizer(_A , return_tensors=self.framework , padding=_A)
__lowerCAmelCase : Optional[int] = [text_inputs]
return inputs
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = model_inputs.pop("candidate_labels")
__lowerCAmelCase : Tuple = model_inputs.pop("text_inputs")
if isinstance(text_inputs[0] , _A):
__lowerCAmelCase : Tuple = text_inputs[0]
else:
# Batching case.
__lowerCAmelCase : str = text_inputs[0][0]
__lowerCAmelCase : Union[str, Any] = self.model(**_A , **_A)
__lowerCAmelCase : Optional[Any] = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = model_outputs.pop("candidate_labels")
__lowerCAmelCase : Any = model_outputs["logits"][0]
if self.framework == "pt":
__lowerCAmelCase : int = logits.softmax(dim=-1).squeeze(-1)
__lowerCAmelCase : List[Any] = probs.tolist()
if not isinstance(_A , _A):
__lowerCAmelCase : Any = [scores]
elif self.framework == "tf":
__lowerCAmelCase : str = stable_softmax(_A , axis=-1)
__lowerCAmelCase : str = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
__lowerCAmelCase : Tuple = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(_A , _A) , key=lambda _SCREAMING_SNAKE_CASE: -x[0])
]
return result
| 269
|
'''simple docstring'''
import functools
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> int:
"""simple docstring"""
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
__A = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161
| 0
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowerCamelCase_ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def lowerCAmelCase( __lowerCamelCase ):
for pegasus_name, hf_name in PATTERNS:
__a = k.replace(__lowerCamelCase , __lowerCamelCase )
return k
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = DEFAULTS.copy()
cfg_kwargs.update(__lowerCamelCase )
__a = PegasusConfig(**__lowerCamelCase )
__a = PegasusForConditionalGeneration(__lowerCamelCase )
__a = torch_model.model.state_dict()
__a = {}
for k, v in tf_weights.items():
__a = rename_state_dict_key(__lowerCamelCase )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
__a = v.T
__a = torch.tensor(__lowerCamelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
__a = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__a = mapping['shared.weight']
__a = mapping['shared.weight']
__a = {k: torch.zeros_like(__lowerCamelCase ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**__lowerCamelCase )
__a , __a = torch_model.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
__a = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCAmelCase( __lowerCamelCase="./ckpt/aeslc/model.ckpt-32000" ):
__a = tf.train.list_variables(__lowerCamelCase )
__a = {}
__a = ['Adafactor', 'global_step']
for name, shape in tqdm(__lowerCamelCase , desc='converting tf checkpoint to dict' ):
__a = any(pat in name for pat in ignore_name )
if skip_key:
continue
__a = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
__a = array
return tf_weights
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
# save tokenizer first
__a = Path(__lowerCamelCase ).parent.name
__a = task_specific_params[f'''summarization_{dataset}''']['max_position_embeddings']
__a = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=__lowerCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__lowerCamelCase )
# convert model
__a = get_tf_weights_as_numpy(__lowerCamelCase )
__a = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
__a = task_specific_params
__a = convert_pegasus(__lowerCamelCase , __lowerCamelCase )
torch_model.save_pretrained(__lowerCamelCase )
__a = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(__lowerCamelCase , Path(__lowerCamelCase ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase_ : Dict = parser.parse_args()
if args.save_dir is None:
lowerCamelCase_ : Optional[Any] = Path(args.tf_ckpt_path).parent.name
lowerCamelCase_ : int = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 362
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 197
| 0
|
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
A: Tuple = logging.get_logger(__name__)
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Dict ):
UpperCAmelCase : Tuple = set()
UpperCAmelCase : Optional[Any] = []
def parse_line(UpperCamelCase : Optional[Any] ):
for line in fp:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase : Dict = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCAmelCase_ ) > 0:
UpperCAmelCase : Union[str, Any] = """\n""".join(lowerCAmelCase_ )
# Only keep the warnings specified in `targets`
if any(F": {x}: " in warning for x in targets ):
selected_warnings.add(lowerCAmelCase_ )
buffer.clear()
continue
else:
UpperCAmelCase : Optional[int] = line.strip()
buffer.append(lowerCAmelCase_ )
if from_gh:
for filename in os.listdir(lowerCAmelCase_ ):
UpperCAmelCase : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCAmelCase_ ) as fp:
parse_line(lowerCAmelCase_ )
else:
try:
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCAmelCase_ ) as fp:
parse_line(lowerCAmelCase_ )
except Exception:
logger.warning(
F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." )
return selected_warnings
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Dict ):
UpperCAmelCase : Optional[int] = set()
UpperCAmelCase : str = [os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) for p in os.listdir(lowerCAmelCase_ ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCAmelCase_ , lowerCAmelCase_ ) )
return selected_warnings
if __name__ == "__main__":
def _snake_case ( UpperCamelCase : Dict ):
return values.split(""",""" )
A: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
A: int = parser.parse_args()
A: List[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
A: Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
A: Optional[Any] = extract_warnings(args.output_dir, args.targets)
A: int = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 109
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : List[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """beit"""
def __init__( self : List[Any] , lowerCAmelCase_ : Tuple=8_1_9_2 , lowerCAmelCase_ : Optional[int]=7_6_8 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Any=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=[3, 5, 7, 1_1] , lowerCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=0.4 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=2_5_5 , **lowerCAmelCase_ : Any , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = version.parse("""1.11""" )
@property
def lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase ( self : Optional[Any] ) -> float:
return 1e-4
| 284
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] ={
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple =[
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =[
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 367
|
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__lowerCAmelCase : Tuple =trt.Logger(trt.Logger.WARNING)
__lowerCAmelCase : Optional[Any] =absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__lowerCAmelCase : List[Any] =logging.getLogger(__name__)
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__lowerCAmelCase : Tuple =parser.parse_args()
if args.tokenizer_name:
__lowerCAmelCase : int =AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__lowerCAmelCase : Union[str, Any] =args.per_device_eval_batch_size
__lowerCAmelCase : List[Any] =(args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__lowerCAmelCase : Tuple =True
__lowerCAmelCase : int ="temp_engine/bert-fp32.engine"
if args.fpaa:
__lowerCAmelCase : Tuple ="temp_engine/bert-fp16.engine"
if args.inta:
__lowerCAmelCase : Optional[int] ="temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__lowerCAmelCase : Tuple =1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__lowerCAmelCase : Optional[Any] =[network.get_input(i) for i in range(network.num_inputs)]
__lowerCAmelCase : Any =[_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__lowerCAmelCase : Optional[Any] =1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__lowerCAmelCase : int =builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__lowerCAmelCase : Dict =builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
A__ = np.asarray(inputs["input_ids"] , dtype=np.intaa )
A__ = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
A__ = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCamelCase )
# start time
A__ = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowerCamelCase ) for d_inp in d_inputs] + [int(_lowerCamelCase ), int(_lowerCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
A__ = time.time()
A__ = end_time - start_time
A__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__lowerCAmelCase : str =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase : List[Any] =load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__lowerCAmelCase : Optional[Any] =raw_datasets["validation"].column_names
__lowerCAmelCase : Optional[Any] ="question" if "question" in column_names else column_names[0]
__lowerCAmelCase : str ="context" if "context" in column_names else column_names[1]
__lowerCAmelCase : Optional[Any] ="answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__lowerCAmelCase : Any =tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__lowerCAmelCase : Any =min(args.max_seq_length, tokenizer.model_max_length)
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=_lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A__ = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A__ = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A__ = tokenized_examples.sequence_ids(_lowerCamelCase )
A__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
__lowerCAmelCase : str =raw_datasets["validation"]
# Validation Feature Creation
__lowerCAmelCase : Union[str, Any] =eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__lowerCAmelCase : List[Any] =default_data_collator
__lowerCAmelCase : List[Any] =eval_dataset.remove_columns(["example_id", "offset_mapping"])
__lowerCAmelCase : List[str] =DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
A__ = postprocess_qa_predictions(
examples=_lowerCamelCase , features=_lowerCamelCase , predictions=_lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A__ = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
A__ = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
A__ = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowerCamelCase , label_ids=_lowerCamelCase )
__lowerCAmelCase : Tuple =load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return trt.volume(engine.get_binding_shape(_lowerCamelCase ) ) * engine.get_binding_dtype(_lowerCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
__lowerCAmelCase : Any =[cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__lowerCAmelCase : List[Any] =cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__lowerCAmelCase : List[str] =cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__lowerCAmelCase : List[str] =cuda.mem_alloc(h_outputa.nbytes)
__lowerCAmelCase : int =cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__lowerCAmelCase : Optional[Any] =cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__lowerCAmelCase : str =0.0
__lowerCAmelCase : Tuple =0
__lowerCAmelCase : List[str] =timeit.default_timer()
__lowerCAmelCase : Union[str, Any] =None
for step, batch in enumerate(eval_dataloader):
__lowerCAmelCase , __lowerCAmelCase : Dict =model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__lowerCAmelCase , __lowerCAmelCase : List[Any] =outputs
__lowerCAmelCase : Tuple =torch.tensor(start_logits)
__lowerCAmelCase : Tuple =torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__lowerCAmelCase : Tuple =accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__lowerCAmelCase : Union[str, Any] =accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__lowerCAmelCase : int =(accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__lowerCAmelCase : List[Any] =logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__lowerCAmelCase : Dict =nested_truncate(all_preds, len(eval_dataset))
__lowerCAmelCase : Optional[int] =timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
__lowerCAmelCase : Optional[Any] =post_processing_function(eval_examples, eval_dataset, all_preds)
__lowerCAmelCase : Optional[Any] =metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 123
| 0
|
from collections.abc import Callable
def lowerCAmelCase__ ( a__: Callable[[float], float] , a__: float , a__: float ) -> float:
'''simple docstring'''
_UpperCAmelCase = a
_UpperCAmelCase = b
if function(a__ ) == 0: # one of the a or b is a root for the function
return a
elif function(a__ ) == 0:
return b
elif (
function(a__ ) * function(a__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
_UpperCAmelCase = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(a__ ) == 0:
return mid
elif function(a__ ) * function(a__ ) < 0:
_UpperCAmelCase = mid
else:
_UpperCAmelCase = mid
_UpperCAmelCase = start + (end - start) / 2.0
return mid
def lowerCAmelCase__ ( a__: float ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 329
|
from collections.abc import Generator
def lowerCAmelCase__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 329
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , *snake_case__ : Any , **snake_case__ : Dict ):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 298
|
"""simple docstring"""
import numpy as np
import datasets
_lowerCAmelCase : Optional[int] = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
_lowerCAmelCase : Tuple = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
_lowerCAmelCase : Optional[int] = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ):
'''simple docstring'''
# convert to numpy arrays
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ )
UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T )
try:
UpperCAmelCase__ : str = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ )
UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ )
UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 298
| 1
|
import qiskit
def __lowerCamelCase ( UpperCamelCase__ = 2 ):
'''simple docstring'''
snake_case_ = qubits
# Using Aer's simulator
snake_case_ = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
snake_case_ = qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCamelCase__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCamelCase__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCamelCase__ ) ) , list(range(UpperCamelCase__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
snake_case_ = qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 285
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase : Dict = """ResNetConfig"""
# Base docstring
_UpperCAmelCase : Optional[int] = """microsoft/resnet-50"""
_UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7]
# Image classification docstring
_UpperCAmelCase : Tuple = """microsoft/resnet-50"""
_UpperCAmelCase : int = """tiger cat"""
_UpperCAmelCase : Optional[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ):
super().__init__()
snake_case_ = nn.Convad(
snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case )
snake_case_ = nn.BatchNormad(snake_case )
snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity()
def a ( self , snake_case ):
snake_case_ = self.convolution(snake_case )
snake_case_ = self.normalization(snake_case )
snake_case_ = self.activation(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case ):
super().__init__()
snake_case_ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
snake_case_ = config.num_channels
def a ( self , snake_case ):
snake_case_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
snake_case_ = self.embedder(snake_case )
snake_case_ = self.pooler(snake_case )
return embedding
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 2 ):
super().__init__()
snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case )
snake_case_ = nn.BatchNormad(snake_case )
def a ( self , snake_case ):
snake_case_ = self.convolution(snake_case )
snake_case_ = self.normalization(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ):
super().__init__()
snake_case_ = in_channels != out_channels or stride != 1
snake_case_ = (
ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity()
)
snake_case_ = nn.Sequential(
ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , )
snake_case_ = ACTaFN[activation]
def a ( self , snake_case ):
snake_case_ = hidden_state
snake_case_ = self.layer(snake_case )
snake_case_ = self.shortcut(snake_case )
hidden_state += residual
snake_case_ = self.activation(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ):
super().__init__()
snake_case_ = in_channels != out_channels or stride != 1
snake_case_ = out_channels // reduction
snake_case_ = (
ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity()
)
snake_case_ = nn.Sequential(
ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , )
snake_case_ = ACTaFN[activation]
def a ( self , snake_case ):
snake_case_ = hidden_state
snake_case_ = self.layer(snake_case )
snake_case_ = self.shortcut(snake_case )
hidden_state += residual
snake_case_ = self.activation(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ):
super().__init__()
snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
snake_case_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def a ( self , snake_case ):
snake_case_ = input
for layer in self.layers:
snake_case_ = layer(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case ):
super().__init__()
snake_case_ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ):
self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) )
def a ( self , snake_case , snake_case = False , snake_case = True ):
snake_case_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case_ = hidden_states + (hidden_state,)
snake_case_ = stage_module(snake_case )
if output_hidden_states:
snake_case_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case , hidden_states=snake_case , )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = ResNetConfig
__SCREAMING_SNAKE_CASE : Any = '''resnet'''
__SCREAMING_SNAKE_CASE : int = '''pixel_values'''
__SCREAMING_SNAKE_CASE : Tuple = True
def a ( self , snake_case ):
if isinstance(snake_case , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a ( self , snake_case , snake_case=False ):
if isinstance(snake_case , snake_case ):
snake_case_ = value
_UpperCAmelCase : Tuple = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase : Optional[int] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , )
class lowercase ( lowercase_ ):
def __init__( self , snake_case ):
super().__init__(snake_case )
snake_case_ = config
snake_case_ = ResNetEmbeddings(snake_case )
snake_case_ = ResNetEncoder(snake_case )
snake_case_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self , snake_case , snake_case = None , snake_case = None ):
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.embedder(snake_case )
snake_case_ = self.encoder(
snake_case , output_hidden_states=snake_case , return_dict=snake_case )
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowercase_ , )
class lowercase ( lowercase_ ):
def __init__( self , snake_case ):
super().__init__(snake_case )
snake_case_ = config.num_labels
snake_case_ = ResNetModel(snake_case )
# classification head
snake_case_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ):
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case )
snake_case_ = outputs.pooler_output if return_dict else outputs[1]
snake_case_ = self.classifier(snake_case )
snake_case_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ = 'single_label_classification'
else:
snake_case_ = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case_ = MSELoss()
if self.num_labels == 1:
snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ = loss_fct(snake_case , snake_case )
elif self.config.problem_type == "single_label_classification":
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ = BCEWithLogitsLoss()
snake_case_ = loss_fct(snake_case , snake_case )
if not return_dict:
snake_case_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , lowercase_ , )
class lowercase ( lowercase_ , lowercase_ ):
def __init__( self , snake_case ):
super().__init__(snake_case )
super()._init_backbone(snake_case )
snake_case_ = [config.embedding_size] + config.hidden_sizes
snake_case_ = ResNetEmbeddings(snake_case )
snake_case_ = ResNetEncoder(snake_case )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC )
def a ( self , snake_case , snake_case = None , snake_case = None ):
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = self.embedder(snake_case )
snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case )
snake_case_ = outputs.hidden_states
snake_case_ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
snake_case_ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
| 285
| 1
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
lowerCamelCase__ : int = UniSpeechSatForSequenceClassification.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
lowerCamelCase__ : Any = downstream_dict['projector.weight']
lowerCamelCase__ : Tuple = downstream_dict['projector.bias']
lowerCamelCase__ : str = downstream_dict['model.post_net.linear.weight']
lowerCamelCase__ : Dict = downstream_dict['model.post_net.linear.bias']
return model
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
lowerCamelCase__ : List[Any] = downstream_dict['model.linear.weight']
lowerCamelCase__ : Optional[int] = downstream_dict['model.linear.bias']
return model
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : Any = UniSpeechSatForXVector.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
lowerCamelCase__ : List[Any] = downstream_dict['connector.weight']
lowerCamelCase__ : Optional[Any] = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCamelCase__ : Optional[Any] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowerCamelCase__ : Dict = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowerCamelCase__ : List[str] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
lowerCamelCase__ : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
lowerCamelCase__ : str = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
lowerCamelCase__ : int = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
lowerCamelCase__ : List[str] = downstream_dict['objective.W']
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase__ : Any = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCamelCase__ : Union[str, Any] = checkpoint['Downstream']
lowerCamelCase__ : int = UniSpeechSatConfig.from_pretrained(_UpperCAmelCase )
lowerCamelCase__ : Dict = WavaVecaFeatureExtractor.from_pretrained(
_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , do_normalize=_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
lowerCamelCase__ : Optional[int] = convert_classification(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
elif arch.endswith('ForAudioFrameClassification' ):
lowerCamelCase__ : Any = convert_diarization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
elif arch.endswith('ForXVector' ):
lowerCamelCase__ : Union[str, Any] = convert_xvector(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowerCamelCase__ : Tuple = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_UpperCAmelCase : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 45
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any=7 , UpperCAmelCase : int=3 , UpperCAmelCase : Optional[Any]=18 , UpperCAmelCase : str=30 , UpperCAmelCase : List[str]=400 , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=True , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = size if size is not None else {'shortest_edge': 18}
lowerCamelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : List[str] = min_resolution
lowerCamelCase__ : Union[str, Any] = max_resolution
lowerCamelCase__ : Optional[int] = do_resize
lowerCamelCase__ : int = size
lowerCamelCase__ : Optional[int] = do_center_crop
lowerCamelCase__ : str = crop_size
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Tuple = image_mean
lowerCamelCase__ : Union[str, Any] = image_std
def A_ ( self : Any ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = LevitImageProcessor if is_vision_available() else None
def A_ ( self : Tuple ) -> Tuple:
lowerCamelCase__ : str = LevitImageProcessingTester(self )
@property
def A_ ( self : Tuple ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Optional[int] ) -> int:
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
def A_ ( self : List[Any] ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCamelCase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def A_ ( self : str ) -> str:
pass
def A_ ( self : Optional[int] ) -> List[Any]:
# Initialize image_processing
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A_ ( self : List[str] ) -> List[Any]:
# Initialize image_processing
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase__ : Any = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A_ ( self : str ) -> int:
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase__ : Any = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 45
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/data2vec-text-base": "https://huggingface.co/data2vec/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ):
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = classifier_dropout
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 45
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = set()
# Replace all the whitespace in our sentence
__a = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCAmelCase__ ) == 26
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = [False] * 26
for char in input_str:
if char.islower():
__a = True
elif char.isupper():
__a = True
return all(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowercase ( ) -> None:
from timeit import timeit
__a = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_faster()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_fastest()''' , setup=lowerCAmelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 45
| 1
|
"""simple docstring"""
from collections.abc import Callable
class UpperCAmelCase_ :
def __init__( self : Dict , A : Callable | None = None ):
# Stores actual heap items.
_UpperCAmelCase : list = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCAmelCase : dict = {}
# Stores current size of heap.
_UpperCAmelCase : Tuple = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCAmelCase : Any = key or (lambda A : x)
def snake_case_ ( self : List[Any] , A : int ):
return int((i - 1) / 2 ) if i > 0 else None
def snake_case_ ( self : List[Any] , A : int ):
_UpperCAmelCase : Tuple = int(2 * i + 1 )
return left if 0 < left < self.size else None
def snake_case_ ( self : List[Any] , A : int ):
_UpperCAmelCase : Tuple = int(2 * i + 2 )
return right if 0 < right < self.size else None
def snake_case_ ( self : Optional[int] , A : int , A : int ):
_UpperCAmelCase , _UpperCAmelCase : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCAmelCase , _UpperCAmelCase : Any = self.arr[j], self.arr[i]
def snake_case_ ( self : List[str] , A : int , A : int ):
return self.arr[i][1] < self.arr[j][1]
def snake_case_ ( self : Dict , A : int ):
_UpperCAmelCase : str = self._left(A )
_UpperCAmelCase : str = self._right(A )
_UpperCAmelCase : List[Any] = i
if left is not None and not self._cmp(A , A ):
_UpperCAmelCase : Optional[int] = left
if right is not None and not self._cmp(A , A ):
_UpperCAmelCase : Any = right
return valid_parent
def snake_case_ ( self : Tuple , A : int ):
_UpperCAmelCase : Tuple = self._parent(A )
while parent is not None and not self._cmp(A , A ):
self._swap(A , A )
_UpperCAmelCase , _UpperCAmelCase : Dict = parent, self._parent(A )
def snake_case_ ( self : Optional[int] , A : int ):
_UpperCAmelCase : Tuple = self._get_valid_parent(A )
while valid_parent != index:
self._swap(A , A )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = valid_parent, self._get_valid_parent(A )
def snake_case_ ( self : Dict , A : int , A : int ):
if item not in self.pos_map:
return
_UpperCAmelCase : Any = self.pos_map[item]
_UpperCAmelCase : Optional[int] = [item, self.key(A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(A )
self._heapify_down(A )
def snake_case_ ( self : List[str] , A : int ):
if item not in self.pos_map:
return
_UpperCAmelCase : str = self.pos_map[item]
del self.pos_map[item]
_UpperCAmelCase : Tuple = self.arr[self.size - 1]
_UpperCAmelCase : List[Any] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(A )
self._heapify_down(A )
def snake_case_ ( self : Any , A : int , A : int ):
_UpperCAmelCase : Any = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(A )] )
else:
_UpperCAmelCase : Any = [item, self.key(A )]
_UpperCAmelCase : List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def snake_case_ ( self : Tuple ):
return self.arr[0] if self.size else None
def snake_case_ ( self : Any ):
_UpperCAmelCase : Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __snake_case ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_lowerCAmelCase : List[str] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase : str = k.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return k
def __snake_case ( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict ) -> PegasusForConditionalGeneration:
'''simple docstring'''
_UpperCAmelCase : List[Any] = DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusConfig(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = torch_model.model.state_dict()
_UpperCAmelCase : Union[str, Any] = {}
for k, v in tf_weights.items():
_UpperCAmelCase : Union[str, Any] = rename_state_dict_key(SCREAMING_SNAKE_CASE__ )
if new_k not in sd:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase : Any = v.T
_UpperCAmelCase : str = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
_UpperCAmelCase : Tuple = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
_UpperCAmelCase : Any = mapping["shared.weight"]
_UpperCAmelCase : Dict = mapping["shared.weight"]
_UpperCAmelCase : Dict = {k: torch.zeros_like(SCREAMING_SNAKE_CASE__ ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( SCREAMING_SNAKE_CASE__ : Dict="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = {}
_UpperCAmelCase : Optional[Any] = ["Adafactor", "global_step"]
for name, shape in tqdm(SCREAMING_SNAKE_CASE__ , desc="converting tf checkpoint to dict" ):
_UpperCAmelCase : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase : int = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Dict = array
return tf_weights
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = Path(SCREAMING_SNAKE_CASE__ ).parent.name
_UpperCAmelCase : Tuple = task_specific_params[f'summarization_{dataset}']["max_position_embeddings"]
_UpperCAmelCase : Dict = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=SCREAMING_SNAKE_CASE__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE__ )
# convert model
_UpperCAmelCase : Union[str, Any] = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = task_specific_params[f'summarization_{dataset}']
if dataset == "large":
_UpperCAmelCase : Optional[int] = task_specific_params
_UpperCAmelCase : str = convert_pegasus(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[Any] = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(SCREAMING_SNAKE_CASE__ , Path(SCREAMING_SNAKE_CASE__ ) / "pytorch_model.bin" )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
if args.save_dir is None:
_lowerCAmelCase : Tuple = Path(args.tf_ckpt_path).parent.name
_lowerCAmelCase : Dict = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 202
| 1
|
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = """Input must be a string of 8 numbers plus letter"""
SCREAMING_SNAKE_CASE : List[Any] = """TRWAGMYFPDXBNJZSQVHLCKE"""
def lowercase ( _snake_case : str ) ->bool:
"""simple docstring"""
if not isinstance(_snake_case , _snake_case ):
__snake_case : Optional[int] = f"""Expected string as input, found {type(_snake_case ).__name__}"""
raise TypeError(_snake_case )
__snake_case : int = spanish_id.replace('''-''' , '''''' ).upper()
if len(_snake_case ) != 9:
raise ValueError(_snake_case )
try:
__snake_case : List[Any] = int(spanish_id_clean[0:8] )
__snake_case : Any = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_snake_case ) from ex
if letter.isdigit():
raise ValueError(_snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 162
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , a : List[Any] , a : List[Any]=13 , a : List[str]=7 , a : Optional[Any]=True , a : int=True , a : Union[str, Any]=True , a : Any=True , a : Any=99 , a : Tuple=32 , a : int=5 , a : Union[str, Any]=4 , a : Tuple=37 , a : Optional[int]="gelu" , a : Any=0.1 , a : List[Any]=0.1 , a : int=5_12 , a : Union[str, Any]=16 , a : Union[str, Any]=2 , a : List[Any]=0.02 , a : Dict=False , a : Any=True , a : Optional[Any]="None" , a : Optional[Any]=3 , a : Optional[int]=4 , a : Tuple=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = relative_attention
__lowerCamelCase = position_biased_input
__lowerCamelCase = pos_att_type
__lowerCamelCase = scope
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_config()
__lowerCamelCase = 3_00
return config
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[Any] ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Union[str, Any] , a : int , a : Tuple , a : str , a : List[Any] , a : Tuple , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = DebertaModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , attention_mask=a , token_type_ids=a )[0]
__lowerCamelCase = model(a , token_type_ids=a )[0]
__lowerCamelCase = model(a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[str] , a : List[Any] , a : List[Any] , a : Optional[Any] , a : List[str] , a : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = DebertaForMaskedLM(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : int , a : str , a : Union[str, Any] , a : Any , a : Dict , a : Any ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = DebertaForSequenceClassification(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Dict , a : int , a : Any , a : Dict , a : Optional[int] , a : List[str] , a : str ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = DebertaForTokenClassification(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Optional[Any] , a : Any , a : List[str] , a : Tuple , a : str , a : Optional[int] , a : Any ):
"""simple docstring"""
__lowerCamelCase = DebertaForQuestionAnswering(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : Union[str, Any] =(
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : int =True
lowerCamelCase : List[str] =False
lowerCamelCase : Optional[Any] =False
lowerCamelCase : int =False
lowerCamelCase : List[Any] =False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = DebertaModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = DebertaModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__lowerCamelCase = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase = model(a , attention_mask=a )[0]
# compare the actual values for a slice.
__lowerCamelCase = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 237
|
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Any , *,
a : int = 4 , a : int = 7_68 , a : int , a : Optional[int] , ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.Parameter(torch.zeros(a ) )
# parameters for additional clip time embeddings
__lowerCamelCase = nn.Linear(a , a )
__lowerCamelCase = nn.Linear(a , a )
# parameters for encoder hidden states
__lowerCamelCase = clip_extra_context_tokens
__lowerCamelCase = nn.Linear(
a , self.clip_extra_context_tokens * cross_attention_dim )
__lowerCamelCase = nn.Linear(a , a )
__lowerCamelCase = nn.LayerNorm(a )
def SCREAMING_SNAKE_CASE__ ( self : int , *, a : Union[str, Any] , a : Any , a : str , a : Any ):
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowerCamelCase = image_embeddings.shape[0]
__lowerCamelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowerCamelCase = classifier_free_guidance_embeddings.expand(
a , -1 )
__lowerCamelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowerCamelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowerCamelCase = self.embedding_proj(a )
__lowerCamelCase = self.clip_image_embeddings_project_to_time_embeddings(a )
__lowerCamelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowerCamelCase = self.clip_extra_context_tokens_proj(a )
__lowerCamelCase = clip_extra_context_tokens.reshape(a , -1 , self.clip_extra_context_tokens )
__lowerCamelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowerCamelCase = self.encoder_hidden_states_proj(a )
__lowerCamelCase = self.text_encoder_hidden_states_norm(a )
__lowerCamelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 237
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase (__lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = str(__lowerCAmelCase )
_UpperCAmelCase : str = [n]
for i in range(1 , len(__lowerCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __lowerCAmelCase (__lowerCAmelCase ):
if len(str(__lowerCAmelCase ) ) > 3:
if not is_prime(int(str(__lowerCAmelCase )[-3:] ) ) or not is_prime(int(str(__lowerCAmelCase )[:3] ) ):
return False
return True
def __lowerCAmelCase (__lowerCAmelCase = 11 ):
_UpperCAmelCase : list[int] = []
_UpperCAmelCase : List[Any] = 13
while len(__lowerCAmelCase ) != count:
if validate(__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list_truncated_nums(__lowerCAmelCase )
if all(is_prime(__lowerCAmelCase ) for i in list_nums ):
list_truncated_primes.append(__lowerCAmelCase )
num += 2
return list_truncated_primes
def __lowerCAmelCase ():
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 234
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = DPTConfig()
if "large" in checkpoint_url:
_UpperCAmelCase : List[str] = 1_024
_UpperCAmelCase : Optional[int] = 4_096
_UpperCAmelCase : Union[str, Any] = 24
_UpperCAmelCase : List[Any] = 16
_UpperCAmelCase : List[Any] = [5, 11, 17, 23]
_UpperCAmelCase : int = [256, 512, 1_024, 1_024]
_UpperCAmelCase : Optional[Any] = (1, 384, 384)
if "ade" in checkpoint_url:
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : List[Any] = 150
_UpperCAmelCase : Optional[Any] = "huggingface/label-files"
_UpperCAmelCase : Optional[int] = "ade20k-id2label.json"
_UpperCAmelCase : Tuple = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) ) , "r" ) )
_UpperCAmelCase : str = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : int = idalabel
_UpperCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : int = [1, 150, 480, 480]
return config, expected_shape
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCAmelCase : str = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
_UpperCAmelCase : List[str] = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
_UpperCAmelCase : Dict = name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
_UpperCAmelCase : int = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
_UpperCAmelCase : int = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
_UpperCAmelCase : int = name.replace("proj" , "projection" )
if "blocks" in name:
_UpperCAmelCase : Tuple = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
_UpperCAmelCase : Optional[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_UpperCAmelCase : int = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
_UpperCAmelCase : List[Any] = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
_UpperCAmelCase : List[str] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
_UpperCAmelCase : str = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
_UpperCAmelCase : int = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
_UpperCAmelCase : Tuple = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
_UpperCAmelCase : Optional[Any] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCAmelCase : List[str] = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_UpperCAmelCase : Tuple = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
_UpperCAmelCase : Optional[int] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
_UpperCAmelCase : Optional[int] = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
_UpperCAmelCase : Optional[Any] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
_UpperCAmelCase : Optional[Any] = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCAmelCase : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCAmelCase : Optional[Any] = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCAmelCase : Dict = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCAmelCase : int = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCAmelCase : int = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
_UpperCAmelCase : Dict = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
_UpperCAmelCase : Tuple = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
_UpperCAmelCase : Optional[int] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
_UpperCAmelCase : Dict = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
_UpperCAmelCase : List[str] = name.replace("pretrained" , "dpt" )
if "bn" in name:
_UpperCAmelCase : Dict = name.replace("bn" , "batch_norm" )
if "head" in name:
_UpperCAmelCase : Tuple = name.replace("head" , "head.head" )
if "encoder.norm" in name:
_UpperCAmelCase : Optional[Any] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
_UpperCAmelCase : Dict = name.replace("auxlayer" , "auxiliary_head.head" )
return name
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : int = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_UpperCAmelCase : str = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : List[str] = in_proj_weight[: config.hidden_size, :]
_UpperCAmelCase : Dict = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : str = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Dict = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
_UpperCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
_UpperCAmelCase : Tuple = state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
_UpperCAmelCase : Any = DPTForSemanticSegmentation(__lowerCAmelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
_UpperCAmelCase : Any = 480 if "ade" in checkpoint_url else 384
_UpperCAmelCase : List[str] = DPTImageProcessor(size=__lowerCAmelCase )
_UpperCAmelCase : Any = prepare_img()
_UpperCAmelCase : Dict = image_processor(__lowerCAmelCase , return_tensors="pt" )
# forward pass
_UpperCAmelCase : Tuple = model(**__lowerCAmelCase ).logits if "ade" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
# Assert logits
_UpperCAmelCase : Dict = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
_UpperCAmelCase : str = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(__lowerCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __lowerCAmelCase )
)
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCamelCase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 234
| 1
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_snake_case = True
except ImportError:
_snake_case = False
try:
from torch.hub import _get_torch_home
_snake_case = _get_torch_home()
except ImportError:
_snake_case = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
_snake_case = os.path.join(torch_cache_home, "transformers")
_snake_case = "https://cdn.huggingface.co"
_snake_case = "https://s3.amazonaws.com/models.huggingface.co/bert"
_snake_case = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
_snake_case = os.path.join(PATH, "config.yaml")
_snake_case = os.path.join(PATH, "attributes.txt")
_snake_case = os.path.join(PATH, "objects.txt")
_snake_case = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
_snake_case = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
_snake_case = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
_snake_case = "pytorch_model.bin"
_snake_case = "config.yaml"
def A ( _lowerCamelCase=OBJECTS , _lowerCamelCase=ATTRIBUTES ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
with open(_lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_lowerCAmelCase : List[Any] = []
with open(_lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = OrderedDict()
with open(_lowerCamelCase , "rb" ) as f:
_lowerCAmelCase : Tuple = pkl.load(_lowerCamelCase )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
_lowerCAmelCase : Optional[int] = ckp.pop(_lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
_lowerCAmelCase : Optional[int] = torch.tensor(_lowerCamelCase )
else:
assert isinstance(_lowerCamelCase , torch.tensor ), type(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = v
return r
class UpperCAmelCase_ :
lowerCamelCase__ = {}
def __init__( self, __a, __a = "root", __a=0):
'''simple docstring'''
_lowerCAmelCase : Tuple = name
_lowerCAmelCase : Union[str, Any] = level
_lowerCAmelCase : Dict = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_lowerCAmelCase : Optional[int] = copy.deepcopy(lowerCamelCase__)
_lowerCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__)
if isinstance(lowerCamelCase__, lowerCamelCase__):
_lowerCAmelCase : Union[str, Any] = Config(lowerCamelCase__, name=lowerCamelCase__, level=level + 1)
_lowerCAmelCase : int = v
setattr(self, lowerCamelCase__, lowerCamelCase__)
_lowerCAmelCase : Tuple = d
def __repr__( self):
'''simple docstring'''
return str(list((self._pointer.keys())))
def __setattr__( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = val
_lowerCAmelCase : Dict = val
_lowerCAmelCase : Dict = key.split(".")
_lowerCAmelCase : int = len(lowerCamelCase__) - 1
_lowerCAmelCase : Dict = self._pointer
if len(lowerCamelCase__) > 1:
for i, l in enumerate(lowerCamelCase__):
if hasattr(self, lowerCamelCase__) and isinstance(getattr(self, lowerCamelCase__), lowerCamelCase__):
setattr(getattr(self, lowerCamelCase__), ".".join(levels[i:]), lowerCamelCase__)
if l == last_level:
_lowerCAmelCase : Dict = val
else:
_lowerCAmelCase : List[str] = pointer[l]
def snake_case__ ( self):
'''simple docstring'''
return self._pointer
def snake_case__ ( self, __a, __a):
'''simple docstring'''
with open(f"{file_name}", "w") as stream:
dump(lowerCamelCase__, lowerCamelCase__)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
with open(f"{file_name}", "w") as stream:
json.dump(lowerCamelCase__, lowerCamelCase__)
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
with open(lowerCamelCase__) as stream:
_lowerCAmelCase : int = load(lowerCamelCase__, Loader=lowerCamelCase__)
return data
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : str = ''' '''
if self._name != "root":
_lowerCAmelCase : Dict = f"{t * (self._level-1)}{self._name}:\n"
else:
_lowerCAmelCase : Dict = ''''''
_lowerCAmelCase : int = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(lowerCamelCase__, lowerCamelCase__):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(lowerCamelCase__).__name__})\n"
_lowerCAmelCase : Optional[Any] = level
return r[:-1]
@classmethod
def snake_case__ ( cls, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = cls.get_config_dict(lowerCamelCase__, **lowerCamelCase__)
return cls(lowerCamelCase__)
@classmethod
def snake_case__ ( cls, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = kwargs.pop("cache_dir", lowerCamelCase__)
_lowerCAmelCase : List[str] = kwargs.pop("force_download", lowerCamelCase__)
_lowerCAmelCase : Optional[Any] = kwargs.pop("resume_download", lowerCamelCase__)
_lowerCAmelCase : int = kwargs.pop("proxies", lowerCamelCase__)
_lowerCAmelCase : Tuple = kwargs.pop("local_files_only", lowerCamelCase__)
if os.path.isdir(lowerCamelCase__):
_lowerCAmelCase : List[str] = os.path.join(lowerCamelCase__, lowerCamelCase__)
elif os.path.isfile(lowerCamelCase__) or is_remote_url(lowerCamelCase__):
_lowerCAmelCase : Dict = pretrained_model_name_or_path
else:
_lowerCAmelCase : Optional[int] = hf_bucket_url(lowerCamelCase__, filename=lowerCamelCase__, use_cdn=lowerCamelCase__)
try:
# Load from URL or cache if already cached
_lowerCAmelCase : List[str] = cached_path(
lowerCamelCase__, cache_dir=lowerCamelCase__, force_download=lowerCamelCase__, proxies=lowerCamelCase__, resume_download=lowerCamelCase__, local_files_only=lowerCamelCase__, )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_lowerCAmelCase : Dict = Config.load_yaml(lowerCamelCase__)
except EnvironmentError:
_lowerCAmelCase : Optional[Any] = '''Can\'t load config for'''
raise EnvironmentError(lowerCamelCase__)
if resolved_config_file == config_file:
print("loading configuration file from path")
else:
print("loading configuration file cache")
return Config.load_yaml(lowerCamelCase__), kwargs
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.load("dump.pt" , map_location=in_tensor.device )
_lowerCAmelCase : List[Any] = in_tensor.numpy()
_lowerCAmelCase : List[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_lowerCamelCase , _lowerCamelCase , rtol=0.01 , atol=0.1 ), (
F"{sum([1 for x in np.isclose(_lowerCamelCase , _lowerCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = urlparse(_lowerCamelCase )
return parsed.scheme in ("http", "https")
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : Any = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_lowerCAmelCase : int = '''/''' not in model_id
if legacy_format:
return F"{endpoint}/{model_id}-{filename}"
else:
return F"{endpoint}/{model_id}/{filename}"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=0 , _lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
ua += "; " + "; ".join("{}/{}".format(_lowerCamelCase , _lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
ua += "; " + user_agent
_lowerCAmelCase : List[str] = {'''user-agent''': ua}
if resume_size > 0:
_lowerCAmelCase : Any = '''bytes=%d-''' % (resume_size,)
_lowerCAmelCase : List[str] = requests.get(_lowerCamelCase , stream=_lowerCamelCase , proxies=_lowerCamelCase , headers=_lowerCamelCase )
if response.status_code == 416: # Range not satisfiable
return
_lowerCAmelCase : Tuple = response.headers.get("Content-Length" )
_lowerCAmelCase : Any = resume_size + int(_lowerCamelCase ) if content_length is not None else None
_lowerCAmelCase : int = tqdm(
unit="B" , unit_scale=_lowerCamelCase , total=_lowerCamelCase , initial=_lowerCamelCase , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowerCamelCase ) )
temp_file.write(_lowerCamelCase )
progress.close()
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=10 , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=False , ):
'''simple docstring'''
if cache_dir is None:
_lowerCAmelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : int = str(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCAmelCase : Tuple = None
if not local_files_only:
try:
_lowerCAmelCase : List[str] = requests.head(_lowerCamelCase , allow_redirects=_lowerCamelCase , proxies=_lowerCamelCase , timeout=_lowerCamelCase )
if response.status_code == 200:
_lowerCAmelCase : Tuple = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_lowerCAmelCase : int = url_to_filename(_lowerCamelCase , _lowerCamelCase )
# get cache path to put the file
_lowerCAmelCase : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowerCamelCase ):
return cache_path
else:
_lowerCAmelCase : List[Any] = [
file
for file in fnmatch.filter(os.listdir(_lowerCamelCase ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(_lowerCamelCase ) > 0:
return os.path.join(_lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set \'local_files_only\'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(_lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_lowerCAmelCase : int = cache_path + '''.lock'''
with FileLock(_lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_lowerCAmelCase : Any = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(_lowerCamelCase , "a+b" ) as f:
yield f
_lowerCAmelCase : List[str] = _resumable_file_manager
if os.path.exists(_lowerCamelCase ):
_lowerCAmelCase : Dict = os.stat(_lowerCamelCase ).st_size
else:
_lowerCAmelCase : Dict = 0
else:
_lowerCAmelCase : Tuple = partial(tempfile.NamedTemporaryFile , dir=_lowerCamelCase , delete=_lowerCamelCase )
_lowerCAmelCase : int = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , _lowerCamelCase , temp_file.name , )
http_get(
_lowerCamelCase , _lowerCamelCase , proxies=_lowerCamelCase , resume_size=_lowerCamelCase , user_agent=_lowerCamelCase , )
os.replace(temp_file.name , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = {'''url''': url, '''etag''': etag}
_lowerCAmelCase : Dict = cache_path + '''.json'''
with open(_lowerCamelCase , "w" ) as meta_file:
json.dump(_lowerCamelCase , _lowerCamelCase )
return cache_path
def A ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = url.encode("utf-8" )
_lowerCAmelCase : Tuple = shaaaa(_lowerCamelCase )
_lowerCAmelCase : Dict = url_hash.hexdigest()
if etag:
_lowerCAmelCase : Optional[Any] = etag.encode("utf-8" )
_lowerCAmelCase : str = shaaaa(_lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , ):
'''simple docstring'''
if cache_dir is None:
_lowerCAmelCase : Tuple = TRANSFORMERS_CACHE
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Tuple = str(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : int = str(_lowerCamelCase )
if is_remote_url(_lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
_lowerCAmelCase : Optional[int] = get_from_cache(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , user_agent=_lowerCamelCase , local_files_only=_lowerCamelCase , )
elif os.path.exists(_lowerCamelCase ):
# File, and it exists.
_lowerCAmelCase : Optional[Any] = url_or_filename
elif urlparse(_lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(_lowerCamelCase ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(_lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(_lowerCamelCase ) and not tarfile.is_tarfile(_lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_lowerCAmelCase : Union[str, Any] = os.path.split(_lowerCamelCase )
_lowerCAmelCase : List[str] = output_file.replace("." , "-" ) + '''-extracted'''
_lowerCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ) and os.listdir(_lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_lowerCAmelCase : Union[str, Any] = output_path + '''.lock'''
with FileLock(_lowerCamelCase ):
shutil.rmtree(_lowerCamelCase , ignore_errors=_lowerCamelCase )
os.makedirs(_lowerCamelCase )
if is_zipfile(_lowerCamelCase ):
with ZipFile(_lowerCamelCase , "r" ) as zip_file:
zip_file.extractall(_lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(_lowerCamelCase ):
_lowerCAmelCase : Dict = tarfile.open(_lowerCamelCase )
tar_file.extractall(_lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(_lowerCamelCase ) )
return output_path_extracted
return output_path
def A ( _lowerCamelCase , _lowerCamelCase="," ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
with open(_lowerCamelCase ) as f:
_lowerCAmelCase : str = eval(f.read() )
else:
_lowerCAmelCase : List[str] = requests.get(_lowerCamelCase )
try:
_lowerCAmelCase : str = requests.json()
except Exception:
_lowerCAmelCase : int = req.content.decode()
assert data is not None, "could not connect"
try:
_lowerCAmelCase : Dict = eval(_lowerCamelCase )
except Exception:
_lowerCAmelCase : Any = data.split("\n" )
req.close()
return data
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = requests.get(_lowerCamelCase )
_lowerCAmelCase : Tuple = np.array(Image.open(BytesIO(response.content ) ) )
return img
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowerCamelCase )
with open(_lowerCamelCase , "rb" ) as stream:
_lowerCAmelCase : str = pkl.load(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = weights.pop("model" )
_lowerCAmelCase : Tuple = {}
for k, v in model.items():
_lowerCAmelCase : Tuple = torch.from_numpy(_lowerCamelCase )
if "running_var" in k:
_lowerCAmelCase : Optional[int] = torch.tensor([0] )
_lowerCAmelCase : Optional[int] = k.replace("running_var" , "num_batches_tracked" )
_lowerCAmelCase : Union[str, Any] = zero
return new
def A ( ):
'''simple docstring'''
print(F"{os.path.abspath(os.path.join(_lowerCamelCase , os.pardir ) )}/demo.ipynb" )
def A ( _lowerCamelCase , _lowerCamelCase="RGB" ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
_lowerCAmelCase : str = cva.imread(_lowerCamelCase )
else:
_lowerCAmelCase : Dict = get_image_from_url(_lowerCamelCase )
assert img is not None, F"could not connect to: {im}"
_lowerCAmelCase : Optional[int] = cva.cvtColor(_lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_lowerCAmelCase : Union[str, Any] = img[:, :, ::-1]
return img
def A ( _lowerCamelCase , _lowerCamelCase=1 ):
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ))
| 357
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 0
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCAmelCase : Tuple = logging.getLogger(__name__)
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self , A=-1 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = label_idx
def __A ( self , A , A ) -> List[InputExample]:
'''simple docstring'''
if isinstance(A , A ):
lowerCamelCase = mode.value
lowerCamelCase = os.path.join(A , F'{mode}.txt' )
lowerCamelCase = 1
lowerCamelCase = []
with open(A , encoding="""utf-8""" ) as f:
lowerCamelCase = []
lowerCamelCase = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=A , labels=A ) )
guid_index += 1
lowerCamelCase = []
lowerCamelCase = []
else:
lowerCamelCase = line.split(""" """ )
words.append(splits[0] )
if len(A ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=A , labels=A ) )
return examples
def __A ( self , A , A , A ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(A )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCamelCase = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(A )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __A ( self , A ) -> List[str]:
'''simple docstring'''
if path:
with open(A , """r""" ) as f:
lowerCamelCase = f.read().splitlines()
if "O" not in labels:
lowerCamelCase = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self ) -> Any:
'''simple docstring'''
super().__init__(label_idx=-2 )
def __A ( self , A ) -> List[str]:
'''simple docstring'''
if path:
with open(A , """r""" ) as f:
lowerCamelCase = f.read().splitlines()
if "O" not in labels:
lowerCamelCase = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __lowercase ( a_ ):
"""simple docstring"""
def __A ( self , A , A ) -> List[InputExample]:
'''simple docstring'''
if isinstance(A , A ):
lowerCamelCase = mode.value
lowerCamelCase = os.path.join(A , F'{mode}.txt' )
lowerCamelCase = 1
lowerCamelCase = []
with open(A , encoding="""utf-8""" ) as f:
for sentence in parse_incr(A ):
lowerCamelCase = []
lowerCamelCase = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(A ) == len(A )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' , words=A , labels=A ) )
guid_index += 1
return examples
def __A ( self , A , A , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = 0
for sentence in parse_incr(A ):
lowerCamelCase = preds_list[example_id]
lowerCamelCase = """"""
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(A )
example_id += 1
def __A ( self , A ) -> List[str]:
'''simple docstring'''
if path:
with open(A , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 252
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=30 , A=4_00 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , A=True , A=1 / 2_55 , A=True , ) -> str:
'''simple docstring'''
lowerCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = min_resolution
lowerCamelCase = max_resolution
lowerCamelCase = do_resize
lowerCamelCase = size
lowerCamelCase = do_normalize
lowerCamelCase = image_mean
lowerCamelCase = image_std
lowerCamelCase = do_rescale
lowerCamelCase = rescale_factor
lowerCamelCase = do_pad
def __A ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __A ( self , A , A=False ) -> List[Any]:
'''simple docstring'''
if not batched:
lowerCamelCase = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase , lowerCamelCase = image.size
else:
lowerCamelCase , lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase = int(self.size["""shortest_edge"""] * h / w )
lowerCamelCase = self.size["""shortest_edge"""]
elif w > h:
lowerCamelCase = self.size["""shortest_edge"""]
lowerCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCamelCase = self.size["""shortest_edge"""]
lowerCamelCase = self.size["""shortest_edge"""]
else:
lowerCamelCase = []
for image in image_inputs:
lowerCamelCase , lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase = max(A , key=lambda A : item[0] )[0]
lowerCamelCase = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = YolosImageProcessor if is_vision_available() else None
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = YolosImageProcessingTester(self )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """do_resize""" ) )
self.assertTrue(hasattr(A , """size""" ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , A )
lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase = image_processing(A , return_tensors="""pt""" ).pixel_values
lowerCamelCase , lowerCamelCase = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
lowerCamelCase = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowerCamelCase = image_processing_a.pad(A , return_tensors="""pt""" )
lowerCamelCase = image_processing_a(A , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCamelCase = json.loads(f.read() )
lowerCamelCase = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
lowerCamelCase = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
lowerCamelCase = image_processing(images=A , annotations=A , return_tensors="""pt""" )
# verify pixel values
lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A )
lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A ) )
# verify boxes
lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A )
lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A ) )
# verify is_crowd
lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A ) )
# verify class_labels
lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A ) )
# verify orig_size
lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A ) )
# verify size
lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A ) )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCamelCase = json.loads(f.read() )
lowerCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
lowerCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCamelCase = YolosImageProcessor(format="""coco_panoptic""" )
lowerCamelCase = image_processing(images=A , annotations=A , masks_path=A , return_tensors="""pt""" )
# verify pixel values
lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , A )
lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , A ) )
# verify boxes
lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , A )
lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , A ) )
# verify is_crowd
lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , A ) )
# verify class_labels
lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , A ) )
# verify masks
lowerCamelCase = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , A )
# verify orig_size
lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , A ) )
# verify size
lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , A ) )
| 252
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE : List[str] = 16
SCREAMING_SNAKE_CASE : Optional[int] = 32
def lowercase ( _snake_case : Accelerator , _snake_case : int = 16 ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__snake_case : Tuple = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__snake_case : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__snake_case : List[str] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case : Dict = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__snake_case : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__snake_case : List[Any] = 16
elif accelerator.mixed_precision != "no":
__snake_case : List[str] = 8
else:
__snake_case : Union[str, Any] = None
return tokenizer.pad(
__a , padding='''longest''' , max_length=__a , pad_to_multiple_of=__a , return_tensors='''pt''' , )
# Instantiate dataloaders.
__snake_case : Any = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
__snake_case : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE : Optional[Any] = mocked_dataloaders # noqa: F811
def lowercase ( _snake_case : List[Any] , _snake_case : Optional[Any] ) ->Any:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __a ) == "1":
__snake_case : Optional[Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__snake_case : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
__snake_case : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case : int = config['''lr''']
__snake_case : Optional[int] = int(config['''num_epochs'''] )
__snake_case : Optional[Any] = int(config['''seed'''] )
__snake_case : Dict = int(config['''batch_size'''] )
set_seed(__a )
__snake_case : Optional[int] = get_dataloaders(__a , __a )
__snake_case : List[str] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__snake_case : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__snake_case : Tuple = batch_size // MAX_GPU_BATCH_SIZE
__snake_case : Dict = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case : Any = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case : Dict = model.to(accelerator.device )
# Instantiate optimizer
__snake_case : Tuple = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
__snake_case : Dict = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case : Tuple = accelerator.prepare(
__a , __a , __a , __a , __a )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__snake_case : Optional[int] = os.path.split(__a )[-1].split('''.''' )[0]
accelerator.init_trackers(__a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__snake_case : Tuple = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__snake_case : int = model(**__a )
__snake_case : Optional[int] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__snake_case : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__snake_case : Optional[int] = model(**__a )
__snake_case : Union[str, Any] = outputs.logits.argmax(dim=-1 )
__snake_case : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__a , references=__a , )
__snake_case : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __a )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(__a ),
'''epoch''': epoch,
} , step=__a , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__a , default=__a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=__a , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__snake_case : Optional[int] = parser.parse_args()
__snake_case : Union[str, Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 365
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : Any = hans_processors[task]()
__snake_case : int = os.path.join(
a_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , )
__snake_case : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Dict = label_list[2], label_list[1]
__snake_case : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : int = cached_features_file + '''.lock'''
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case : Union[str, Any] = torch.load(a_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case : Dict = (
processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
)
logger.info('''Training examples: %s''' , len(a_ ) )
__snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
logger.info('''Saving features into cached file %s''' , a_ )
torch.save(self.features , a_ )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : List[Any] = hans_processors[task]()
__snake_case : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Tuple = label_list[2], label_list[1]
__snake_case : Dict = label_list
__snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
__snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case : Union[str, Any] = tf.data.Dataset.from_generator(
a_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.dataset
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = []
for i, line in enumerate(a_ ):
if i == 0:
continue
__snake_case : Tuple = '''%s-%s''' % (set_type, line[0])
__snake_case : Dict = line[5]
__snake_case : int = line[6]
__snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case : List[Any] = line[0]
examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) )
return examples
def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]:
"""simple docstring"""
__snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )}
__snake_case : Tuple = []
for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , )
__snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0
__snake_case : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE : Dict = {
"""hans""": 3,
}
SCREAMING_SNAKE_CASE : str = {
"""hans""": HansProcessor,
}
| 24
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__a: Optional[int] = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__a: List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 198
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__a: Tuple = None
__a: Tuple = logging.get_logger(__name__)
__a: Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a: Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__a: Tuple = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = TaTokenizer
SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=100 , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> Union[str, Any]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ : Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(__lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ : Dict = len(set(filter(lambda __lowerCAmelCase : bool('''extra_id_''' in str(__lowerCAmelCase ) ) , __lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , extra_ids=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : Union[str, Any] = vocab_file
lowercase__ : Optional[int] = False if not self.vocab_file else True
lowercase__ : Any = extra_ids
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __lowerCAmelCase , )
return max_model_length
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ : Dict = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCAmelCase( self ) -> List[Any]:
return list(
set(filter(lambda __lowerCAmelCase : bool(re.search(r'''<extra_id_\d+>''' , __lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCAmelCase( self ) -> Tuple:
return [self.convert_tokens_to_ids(__lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 198
| 1
|
'''simple docstring'''
__A : Union[str, Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def UpperCamelCase_ ( A__ : dict , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = set()
# keep track of all the paths to be checked
lowerCAmelCase_ : Optional[int] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowerCAmelCase_ : Optional[int] = queue.pop(0 )
# get the last node from the path
lowerCAmelCase_ : Dict = path[-1]
if node not in explored:
lowerCAmelCase_ : List[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowerCAmelCase_ : int = list(A__ )
new_path.append(A__ )
queue.append(A__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A__ )
# in case there's no path between the 2 nodes
return []
def UpperCamelCase_ ( A__ : dict , A__ : Tuple , A__ : Any ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowerCAmelCase_ : Tuple = [start]
lowerCAmelCase_ : str = set(A__ )
# Keep tab on distances from `start` node.
lowerCAmelCase_ : Optional[int] = {start: 0, target: -1}
while queue:
lowerCAmelCase_ : str = queue.pop(0 )
if node == target:
lowerCAmelCase_ : Optional[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A__ )
queue.append(A__ )
lowerCAmelCase_ : Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 89
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : int | float | str , A__ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase_ : str = int(A__ )
lowerCAmelCase_ : Tuple = int(A__ )
lowerCAmelCase_ : list[str] = []
for temp in range(int(A__ ) ):
series.append(f'1 / {pow(temp + 1 , int(A__ ) )}' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : str = int(input("Enter the last number (nth term) of the P-Series"))
__A : Tuple = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 89
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : int = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[int] ="""gptj"""
lowerCamelCase : str ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase__=5_0400 , lowerCAmelCase__=2048 , lowerCAmelCase__=4096 , lowerCAmelCase__=28 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__=None , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=5_0256 , lowerCAmelCase__=5_0256 , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Tuple:
a : List[Any] = vocab_size
a : Tuple = n_positions
a : Optional[Any] = n_embd
a : Any = n_layer
a : Tuple = n_head
a : Optional[Any] = n_inner
a : Dict = rotary_dim
a : Optional[int] = activation_function
a : Any = resid_pdrop
a : Optional[Any] = embd_pdrop
a : Union[str, Any] = attn_pdrop
a : Optional[int] = layer_norm_epsilon
a : Union[str, Any] = initializer_range
a : Optional[Any] = use_cache
a : List[str] = bos_token_id
a : List[Any] = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ )
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = "default" , lowerCAmelCase__ = None , lowerCAmelCase__ = False , ) -> Union[str, Any]:
super().__init__(lowerCAmelCase__ , task=lowerCAmelCase__ , patching_specs=lowerCAmelCase__ , use_past=lowerCAmelCase__ )
if not getattr(self._config , "pad_token_id" , lowerCAmelCase__ ):
# TODO: how to do that better?
a : Optional[int] = 0
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
a : Any = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" )
a : int = {0: "batch", 1: "past_sequence + sequence"}
else:
a : Union[str, Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __a ( self ) -> int:
return self._config.n_layer
@property
def __a ( self ) -> int:
return self._config.n_head
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
a : Optional[int] = super(lowerCAmelCase__ , self ).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
# We need to order the input in the way they appears in the forward()
a : Optional[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a, a : List[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a : Optional[int] = seqlen + 2
a : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
a : List[str] = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers )
]
a : List[Any] = common_inputs["attention_mask"]
if self.use_past:
a : int = ordered_inputs["attention_mask"].dtype
a : List[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
return ordered_inputs
@property
def __a ( self ) -> int:
return 13
| 105
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : List[str] =logging.get_logger(__name__)
A_ : Optional[Any] ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ : Tuple ={
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A_ : Any ={
"""junnyu/roformer_chinese_small""": 1_5_3_6,
"""junnyu/roformer_chinese_base""": 1_5_3_6,
"""junnyu/roformer_chinese_char_small""": 5_1_2,
"""junnyu/roformer_chinese_char_base""": 5_1_2,
"""junnyu/roformer_small_discriminator""": 1_2_8,
"""junnyu/roformer_small_generator""": 1_2_8,
}
A_ : List[str] ={
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : List[Any] = RoFormerTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , a__ ) != do_lower_case
or pre_tok_state.get('strip_accents' , a__ ) != strip_accents
):
_lowerCamelCase = getattr(a__ , pre_tok_state.pop('type' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = pre_tok_class(**a__ )
_lowerCamelCase = do_lower_case
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = BertPreTokenizer()
return state
def __setstate__( self , a__ ):
_lowerCamelCase = d
_lowerCamelCase = self.__dict__['_tokenizer'].get_vocab()
_lowerCamelCase = PreTokenizer.custom(JiebaPreTokenizer(a__ ) )
def snake_case_ ( self , a__ , a__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def snake_case_ ( self , a__ , a__=None , a__=None , a__=False , **a__ , ):
_lowerCamelCase = BertPreTokenizer()
return super().save_pretrained(a__ , a__ , a__ , a__ , **a__ )
| 80
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : List[str] =logging.get_logger(__name__)
A_ : Optional[Any] ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ : Tuple ={
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A_ : Any ={
"""junnyu/roformer_chinese_small""": 1_5_3_6,
"""junnyu/roformer_chinese_base""": 1_5_3_6,
"""junnyu/roformer_chinese_char_small""": 5_1_2,
"""junnyu/roformer_chinese_char_base""": 5_1_2,
"""junnyu/roformer_small_discriminator""": 1_2_8,
"""junnyu/roformer_small_generator""": 1_2_8,
}
A_ : List[str] ={
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : List[Any] = RoFormerTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , a__ ) != do_lower_case
or pre_tok_state.get('strip_accents' , a__ ) != strip_accents
):
_lowerCamelCase = getattr(a__ , pre_tok_state.pop('type' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = pre_tok_class(**a__ )
_lowerCamelCase = do_lower_case
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = BertPreTokenizer()
return state
def __setstate__( self , a__ ):
_lowerCamelCase = d
_lowerCamelCase = self.__dict__['_tokenizer'].get_vocab()
_lowerCamelCase = PreTokenizer.custom(JiebaPreTokenizer(a__ ) )
def snake_case_ ( self , a__ , a__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def snake_case_ ( self , a__ , a__=None , a__=None , a__=False , **a__ , ):
_lowerCamelCase = BertPreTokenizer()
return super().save_pretrained(a__ , a__ , a__ , a__ , **a__ )
| 80
| 1
|
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__(self ):
__lowercase= {}
def _A (self , lowerCAmelCase ):
__lowercase= {}
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if nodea not in self.connections:
self.add_node(lowerCAmelCase )
if nodea not in self.connections:
self.add_node(lowerCAmelCase )
__lowercase= probability
def _A (self ):
return list(self.connections )
def _A (self , lowerCAmelCase ):
__lowercase= 0
__lowercase= random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> dict[str, int]:
'''simple docstring'''
__lowercase= MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase= Counter(graph.get_nodes() )
__lowercase= start
for _ in range(_UpperCAmelCase ):
__lowercase= graph.transition(_UpperCAmelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 295
|
from __future__ import annotations
from collections.abc import Callable
UpperCAmelCase__ = list[list[float | int]]
def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : Matrix ) -> Matrix:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(size + 1 )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for row in range(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = matrix[row][col]
_UpperCAmelCase = vector[row][0]
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCAmelCase , _UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCAmelCase ):
_UpperCAmelCase = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCAmelCase ):
for row in range(_UpperCAmelCase ):
_UpperCAmelCase = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCAmelCase )
]
def A ( _UpperCAmelCase : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = [[0] for _ in range(_UpperCAmelCase )]
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for x_val, y_val in enumerate(_UpperCAmelCase ):
for col in range(_UpperCAmelCase ):
_UpperCAmelCase = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase = y_val
_UpperCAmelCase = solve(_UpperCAmelCase , _UpperCAmelCase )
def interpolated_func(_UpperCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCAmelCase ) )
return interpolated_func
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A ( _UpperCAmelCase : Callable[[int], int] = question_function , _UpperCAmelCase : int = 10 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [func(_UpperCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase = 0
_UpperCAmelCase = 42
_UpperCAmelCase = 42
for poly in polynomials:
_UpperCAmelCase = 1
while func(_UpperCAmelCase ) == poly(_UpperCAmelCase ):
x_val += 1
ret += poly(_UpperCAmelCase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 339
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase: str = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Dict = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: str = ["CLIPFeatureExtractor"]
__lowercase: str = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 1
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowercase_ = "."
if __name__ == "__main__":
lowercase_ = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
lowercase_ = []
lowercase_ = []
with open(doctest_file_path) as fp:
for line in fp:
lowercase_ = line.strip()
lowercase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowercase_ = "\n".join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 45
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = get_activation('''swish''' )
self.assertIsInstance(_a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''silu''' )
self.assertIsInstance(_a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''mish''' )
self.assertIsInstance(_a , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''gelu''' )
self.assertIsInstance(_a , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 45
| 1
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[int]="pt" ) -> Any:
A_ : Union[str, Any] = {"add_prefix_space": True} if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not line.startswith(" " ) else {}
A_ : Dict = padding_side
return tokenizer(
[line] , max_length=_lowerCAmelCase , padding="max_length" if pad_to_max_length else None , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=None , ) -> str:
A_ : Optional[Any] = input_ids.ne(_lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :str , snake_case :Dict , snake_case :Any="train" , snake_case :List[Any]=None , snake_case :Tuple=None , snake_case :int=None , snake_case :Any="" , ):
'''simple docstring'''
super().__init__()
A_ : List[str] = Path(snake_case ).joinpath(type_path + ".source" )
A_ : Any = Path(snake_case ).joinpath(type_path + ".target" )
A_ : str = self.get_char_lens(self.src_file )
A_ : Tuple = max_source_length
A_ : int = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
A_ : str = tokenizer
A_ : List[Any] = prefix
if n_obs is not None:
A_ : List[str] = self.src_lens[:n_obs]
A_ : Any = src_lang
A_ : Tuple = tgt_lang
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self :List[str] , snake_case :List[Any] ):
'''simple docstring'''
A_ : Optional[int] = index + 1 # linecache starts at 1
A_ : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) , snake_case ).rstrip("\n" )
A_ : Union[str, Any] = linecache.getline(str(self.tgt_file ) , snake_case ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : Tuple = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case ) else self.tokenizer
)
A_ : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , snake_case ) else self.tokenizer
A_ : int = encode_line(snake_case , snake_case , self.max_source_length , "right" )
A_ : Optional[int] = encode_line(snake_case , snake_case , self.max_target_length , "right" )
A_ : str = source_inputs["input_ids"].squeeze()
A_ : Union[str, Any] = target_inputs["input_ids"].squeeze()
A_ : Dict = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( snake_case :Optional[int] ):
'''simple docstring'''
return [len(snake_case ) for x in Path(snake_case ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = torch.stack([x["input_ids"] for x in batch] )
A_ : Tuple = torch.stack([x["attention_mask"] for x in batch] )
A_ : int = torch.stack([x["decoder_input_ids"] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case )
else self.tokenizer.pad_token_id
)
A_ : Union[str, Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case )
else self.tokenizer.pad_token_id
)
A_ : Optional[Any] = trim_batch(snake_case , snake_case )
A_ : Optional[int] = trim_batch(snake_case , snake_case , attention_mask=snake_case )
A_ : str = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
_lowerCAmelCase : Tuple = getLogger(__name__)
def __snake_case ( _lowerCAmelCase : List[List] ) -> Union[str, Any]:
return list(itertools.chain.from_iterable(_lowerCAmelCase ) )
def __snake_case ( _lowerCAmelCase : str ) -> None:
A_ : List[Any] = get_git_info()
save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , "git_log.json" ) )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=4 , **_lowerCAmelCase : Dict ) -> Dict:
with open(_lowerCAmelCase , "w" ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase , **_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> List[str]:
with open(_lowerCAmelCase ) as f:
return json.load(_lowerCAmelCase )
def __snake_case ( ) -> int:
A_ : Tuple = git.Repo(search_parent_directories=_lowerCAmelCase )
A_ : List[str] = {
"repo_id": str(_lowerCAmelCase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __snake_case ( _lowerCAmelCase : Callable , _lowerCAmelCase : Iterable ) -> List:
return list(map(_lowerCAmelCase , _lowerCAmelCase ) )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ) -> int:
with open(_lowerCAmelCase , "wb" ) as f:
return pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Dict ) -> Union[str, Any]:
def remove_articles(_lowerCAmelCase : Optional[Any] ):
return re.sub(r"\b(a|an|the)\b" , " " , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : List[str] ):
A_ : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> int:
A_ : List[str] = normalize_answer(_lowerCAmelCase ).split()
A_ : Optional[Any] = normalize_answer(_lowerCAmelCase ).split()
A_ : Tuple = Counter(_lowerCAmelCase ) & Counter(_lowerCAmelCase )
A_ : List[Any] = sum(common.values() )
if num_same == 0:
return 0
A_ : List[str] = 1.0 * num_same / len(_lowerCAmelCase )
A_ : Optional[int] = 1.0 * num_same / len(_lowerCAmelCase )
A_ : Dict = (2 * precision * recall) / (precision + recall)
return fa
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ) -> Optional[Any]:
return normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ) -> Dict:
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
A_ : List[Any] = 0
for hypo, pred in zip(_lowerCAmelCase , _lowerCAmelCase ):
em += exact_match_score(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
em /= len(_lowerCAmelCase )
return {"em": em}
def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> str:
return model_prefix.startswith("rag" )
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Optional[Any]:
A_ : Tuple = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : int = "dropout_rate"
for p in extra_params:
if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if not hasattr(_lowerCAmelCase , _lowerCAmelCase ) and not hasattr(_lowerCAmelCase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
continue
A_ : Union[str, Any] = p if hasattr(_lowerCAmelCase , _lowerCAmelCase ) else equivalent_param[p]
setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
delattr(_lowerCAmelCase , _lowerCAmelCase )
return hparams, config
| 353
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCAmelCase : Optional[Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCAmelCase : Tuple = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCAmelCase : List[str] = '''zero2'''
_lowerCAmelCase : Dict = '''zero3'''
_lowerCAmelCase : Tuple = [ZEROa, ZEROa]
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> Any:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
A_ : Dict = parameterized.to_safe_name("_".join(str(_lowerCAmelCase ) for x in param.args ) )
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
_lowerCAmelCase : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Tuple , snake_case :Tuple ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Tuple , snake_case :Optional[Any] ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :int ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :str , snake_case :str , snake_case :int = 10 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = True , ):
'''simple docstring'''
A_ : Any = models[model]
A_ : List[Any] = self.run_trainer(
stage=snake_case , model_name=snake_case , eval_steps=snake_case , num_train_epochs=1 , distributed=snake_case , fpaa=snake_case , )
self.do_checks(snake_case )
return output_dir
def SCREAMING_SNAKE_CASE ( self :str , snake_case :str , snake_case :str , snake_case :int = 10 , snake_case :int = 1 , snake_case :bool = True , snake_case :bool = True , ):
'''simple docstring'''
A_ : List[Any] = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case )
A_ : Tuple = f"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(snake_case )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A_ : List[str] = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
A_ : List[str] = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
A_ : str = self.get_launcher(snake_case )
A_ : Dict = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Optional[Any]=False ):
'''simple docstring'''
A_ : int = min(2 , get_gpu_count() ) if distributed else 1
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 70
| 0
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a__ :
def __init__( self , _a , _a=13 , _a=7 , _a=False , _a=True , _a=False , _a=False , _a=19 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=3 , _a=4 , _a=None , ):
lowercase : Dict = parent
lowercase : List[Any] = batch_size
lowercase : Any = seq_length
lowercase : Optional[int] = is_training
lowercase : Dict = use_input_mask
lowercase : Dict = use_token_type_ids
lowercase : int = use_labels
lowercase : List[Any] = vocab_size
lowercase : Optional[int] = hidden_size
lowercase : List[Any] = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : Dict = hidden_act
lowercase : Any = hidden_dropout_prob
lowercase : int = attention_probs_dropout_prob
lowercase : Any = max_position_embeddings
lowercase : str = type_vocab_size
lowercase : Optional[int] = type_sequence_label_size
lowercase : Any = initializer_range
lowercase : Dict = num_labels
lowercase : List[str] = num_choices
lowercase : List[str] = scope
def __magic_name__ ( self ):
lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any = None
if self.use_input_mask:
lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : int = None
lowercase : Any = None
lowercase : List[Any] = None
if self.use_labels:
lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : int = ids_tensor([self.batch_size] , self.num_choices )
lowercase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self ):
lowercase : List[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_a , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def __magic_name__ ( self , _a , _a , _a , _a , _a , _a ):
lowercase : Optional[int] = EsmForProteinFolding(config=_a ).float()
model.to(_a )
model.eval()
lowercase : Any = model(_a , attention_mask=_a )
lowercase : Union[str, Any] = model(_a )
lowercase : int = model(_a )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[str] = config_and_inputs
lowercase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( a_, a_, unittest.TestCase ):
__lowerCAmelCase = False
__lowerCAmelCase = (EsmForProteinFolding,) if is_torch_available() else ()
__lowerCAmelCase = ()
__lowerCAmelCase = {} if is_torch_available() else {}
__lowerCAmelCase = False
def __magic_name__ ( self ):
lowercase : Dict = EsmFoldModelTester(self )
lowercase : List[Any] = ConfigTester(self , config_class=_a , hidden_size=37 )
def __magic_name__ ( self ):
self.config_tester.run_common_tests()
def __magic_name__ ( self ):
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
@unittest.skip("Does not support attention outputs" )
def __magic_name__ ( self ):
pass
@unittest.skip
def __magic_name__ ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def __magic_name__ ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold only has one output format." )
def __magic_name__ ( self ):
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold does not support input chunking." )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def __magic_name__ ( self ):
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def __magic_name__ ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __magic_name__ ( self ):
pass
@require_torch
class a__ ( a_ ):
@slow
def __magic_name__ ( self ):
lowercase : Dict = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
lowercase : List[str] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase : List[str] = model(_a )["positions"]
lowercase : Any = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _a , atol=1E-4 ) )
| 202
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Optional[int] = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Union[str, Any] = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_A : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202
| 1
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> list:
'''simple docstring'''
if len(__UpperCAmelCase ) <= 1:
return [tuple(__UpperCAmelCase )]
snake_case_ = []
def generate(__UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = [0] * n
res.append(tuple(__UpperCAmelCase ) )
snake_case_ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
snake_case_ ,snake_case_ = arr[i], arr[0]
else:
snake_case_ ,snake_case_ = arr[i], arr[c[i]]
res.append(tuple(__UpperCAmelCase ) )
c[i] += 1
snake_case_ = 0
else:
snake_case_ = 0
i += 1
generate(len(__UpperCAmelCase ), __UpperCAmelCase )
return res
if __name__ == "__main__":
a : int = input('Enter numbers separated by a comma:\n').strip()
a : Tuple = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 72
|
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
a : Any = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
a : int = spec.loader.load_module()
a : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
a : str = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
a : str = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def __magic_name__ ( ) -> Any:
'''simple docstring'''
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ = False
# source code of `config_class`
snake_case_ = inspect.getsource(__UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(__UpperCAmelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_ ,snake_case_ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
snake_case_ = True
break
snake_case_ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
snake_case_ = '''\n'''.join(sorted(__UpperCAmelCase ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 72
| 1
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : List[Any] =16
__lowerCAmelCase : Any =32
def UpperCamelCase ( _lowerCamelCase : Accelerator , _lowerCamelCase : int = 16 ):
A__ = AutoTokenizer.from_pretrained("bert-base-cased" )
A__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCamelCase : Any ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCamelCase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 16
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
_lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCAmelCase : List[Any] =mocked_dataloaders # noqa: F811
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase ) == "1":
A__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
A__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config["lr"]
A__ = int(config["num_epochs"] )
A__ = int(config["seed"] )
A__ = int(config["batch_size"] )
set_seed(_lowerCamelCase )
A__, A__ = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
A__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=_lowerCamelCase )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__, A__, A__, A__, A__ = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
A__ = os.path.split(_lowerCamelCase )[-1].split("." )[0]
accelerator.init_trackers(_lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
A__ = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**_lowerCamelCase )
A__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
A__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**_lowerCamelCase )
A__ = outputs.logits.argmax(dim=-1 )
A__, A__ = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _lowerCamelCase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_lowerCamelCase ),
"epoch": epoch,
} , step=_lowerCamelCase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def UpperCamelCase ( ):
A__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_lowerCamelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
A__ = parser.parse_args()
A__ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 237
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class UpperCAmelCase :
__lowercase = 42
__lowercase = None
@staticmethod
def UpperCAmelCase_ ( )-> Dict:
raise NotImplementedError
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :str , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> str:
raise NotImplementedError
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :int )-> Any:
raise NotImplementedError
def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]:
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def UpperCAmelCase_ ( cls :int )-> Any:
return F"`pip install {cls.pip_package or cls.name}`"
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """optuna"""
@staticmethod
def UpperCAmelCase_ ( )-> int:
return is_optuna_available()
def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :int , lowercase_ :str , **lowercase_ :List[Any] )-> Tuple:
return run_hp_search_optuna(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] )-> Optional[Any]:
return default_hp_space_optuna(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """ray"""
__lowercase = """'ray[tune]'"""
@staticmethod
def UpperCAmelCase_ ( )-> str:
return is_ray_available()
def UpperCAmelCase_ ( self :int , lowercase_ :Dict , lowercase_ :int , lowercase_ :str , **lowercase_ :List[str] )-> int:
return run_hp_search_ray(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Dict )-> int:
return default_hp_space_ray(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """sigopt"""
@staticmethod
def UpperCAmelCase_ ( )-> Union[str, Any]:
return is_sigopt_available()
def UpperCAmelCase_ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> Dict:
return run_hp_search_sigopt(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[int] )-> List[str]:
return default_hp_space_sigopt(lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """wandb"""
@staticmethod
def UpperCAmelCase_ ( )-> List[str]:
return is_wandb_available()
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :str , **lowercase_ :Dict )-> List[str]:
return run_hp_search_wandb(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :str )-> Dict:
return default_hp_space_wandb(lowercase_ )
__lowerCAmelCase : int ={
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def UpperCamelCase ( ):
A__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_lowerCamelCase ) > 0:
A__ = available_backends[0].name
if len(_lowerCamelCase ) > 1:
logger.info(
F"{len(_lowerCamelCase )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 237
| 1
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
UpperCamelCase = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
UpperCamelCase = {
'''ctrl''': 256,
}
UpperCamelCase = {
'''Pregnancy''': 168629,
'''Christianity''': 7675,
'''Explain''': 106423,
'''Fitness''': 63440,
'''Saving''': 63163,
'''Ask''': 27171,
'''Ass''': 95985,
'''Joke''': 163509,
'''Questions''': 45622,
'''Thoughts''': 49605,
'''Retail''': 52342,
'''Feminism''': 164338,
'''Writing''': 11992,
'''Atheism''': 192263,
'''Netflix''': 48616,
'''Computing''': 39639,
'''Opinion''': 43213,
'''Alone''': 44967,
'''Funny''': 58917,
'''Gaming''': 40358,
'''Human''': 4088,
'''India''': 1331,
'''Joker''': 77138,
'''Diet''': 36206,
'''Legal''': 11859,
'''Norman''': 4939,
'''Tip''': 72689,
'''Weight''': 52343,
'''Movies''': 46273,
'''Running''': 23425,
'''Science''': 2090,
'''Horror''': 37793,
'''Confession''': 60572,
'''Finance''': 12250,
'''Politics''': 16360,
'''Scary''': 191985,
'''Support''': 12654,
'''Technologies''': 32516,
'''Teenage''': 66160,
'''Event''': 32769,
'''Learned''': 67460,
'''Notion''': 182770,
'''Wikipedia''': 37583,
'''Books''': 6665,
'''Extract''': 76050,
'''Confessions''': 102701,
'''Conspiracy''': 75932,
'''Links''': 63674,
'''Narcissus''': 150425,
'''Relationship''': 54766,
'''Relationships''': 134796,
'''Reviews''': 41671,
'''News''': 4256,
'''Translation''': 26820,
'''multilingual''': 128406,
}
def SCREAMING_SNAKE_CASE( __lowercase ) -> Union[str, Any]:
A: Dict = set()
A: Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: Optional[int] = char
A: Union[str, Any] = set(__lowercase )
return pairs
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] = CONTROL_CODES
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any="<unk>" , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
'''simple docstring'''
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
A: int = json.load(SCREAMING_SNAKE_CASE_ )
A: str = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
A: List[Any] = merges_handle.read().split('''\n''' )[1:-1]
A: Any = [tuple(merge.split() ) for merge in merges]
A: Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Any = {}
@property
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ) -> Tuple:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ )
A: str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
A: int = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
A: Union[str, Any] = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Any = bigram
A: Optional[int] = []
A: Dict = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
A: Optional[Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: List[Any] = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: Optional[int] = tuple(SCREAMING_SNAKE_CASE_ )
A: List[Any] = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
A: Tuple = get_pairs(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
A: Any = word[:-4]
A: Tuple = word
return word
def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
A: int = []
A: Any = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A: Tuple = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A: Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
A: Any = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
A: Optional[int] = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
A: Optional[Any] = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 334
|
'''simple docstring'''
import os
from distutils.util import strtobool
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[Any]:
for e in env_keys:
A: Dict = int(os.environ.get(__lowercase , -1 ) )
if val >= 0:
return val
return default
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False ) -> List[str]:
A: str = os.environ.get(__lowercase , str(__lowercase ) )
return strtobool(__lowercase ) == 1 # As its name indicates `strtobool` actually returns an int...
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase="no" ) -> str:
A: Optional[int] = os.environ.get(__lowercase , str(__lowercase ) )
return value
| 334
| 1
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def A_ ( ) -> str:
UpperCamelCase : List[str] = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
UpperCamelCase : Optional[Any] = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
# Let's go
UpperCamelCase : Optional[int] = parser.parse_args()
if not hasattr(_lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 52
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Optional[int] = '''
Human: <<task>>
Assistant: '''
_lowerCAmelCase : int = '''huggingface-tools/default-prompts'''
_lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]:
if prompt_or_repo_id is None:
A_ : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
A_ : Optional[Any] = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 300
| 0
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = '''blip_2_vision_model'''
def __init__( self : int , _UpperCAmelCase : Tuple=1_408 , _UpperCAmelCase : Dict=6_144 , _UpperCAmelCase : Tuple=39 , _UpperCAmelCase : Dict=16 , _UpperCAmelCase : List[str]=224 , _UpperCAmelCase : Dict=14 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Any=0.0_0001 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Union[str, Any]=1E-1_0 , _UpperCAmelCase : Optional[Any]=True , **_UpperCAmelCase : Optional[int] , ):
super().__init__(**_UpperCAmelCase )
_A = hidden_size
_A = intermediate_size
_A = num_hidden_layers
_A = num_attention_heads
_A = patch_size
_A = image_size
_A = initializer_range
_A = attention_dropout
_A = layer_norm_eps
_A = hidden_act
_A = qkv_bias
@classmethod
def lowerCAmelCase_ ( cls : List[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[Any] ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_A , _A = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_A = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = '''blip_2_qformer'''
def __init__( self : int , _UpperCAmelCase : int=30_522 , _UpperCAmelCase : Optional[Any]=768 , _UpperCAmelCase : Dict=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : int=0 , _UpperCAmelCase : str="absolute" , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=1_408 , **_UpperCAmelCase : Any , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = cross_attention_frequency
_A = encoder_hidden_size
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Optional[int] ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_A , _A = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_A = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = '''blip-2'''
UpperCAmelCase : List[str] = True
def __init__( self : List[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[str]=32 , **_UpperCAmelCase : Union[str, Any] ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
_A = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_A = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_A = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_A = BlipaVisionConfig(**_UpperCAmelCase )
_A = BlipaQFormerConfig(**_UpperCAmelCase )
_A = text_config['model_type'] if 'model_type' in text_config else 'opt'
_A = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
_A = self.text_config.tie_word_embeddings
_A = self.text_config.is_encoder_decoder
_A = num_query_tokens
_A = self.vision_config.hidden_size
_A = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_A = 1.0
_A = 0.02
@classmethod
def lowerCAmelCase_ ( cls : List[str] , _UpperCAmelCase : BlipaVisionConfig , _UpperCAmelCase : BlipaQFormerConfig , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Tuple , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = copy.deepcopy(self.__dict__ )
_A = self.vision_config.to_dict()
_A = self.qformer_config.to_dict()
_A = self.text_config.to_dict()
_A = self.__class__.model_type
return output
| 271
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
a = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
a = {
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
a = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
a = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
a = {
'''num_train_timesteps''': 151,
'''sigma_min''': 0.0_0_2,
'''sigma_max''': 8_0.0,
}
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def _snake_case ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
_A = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
_A = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
_A = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
_A = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
_A = checkpoint[F'''{old_prefix}.skip_connection.weight''']
_A = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def _snake_case ( _snake_case : List[Any] , _snake_case : int , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : int=None ) -> Optional[int]:
'''simple docstring'''
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
_A , _A , _A = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
_A = checkpoint[F'''{old_prefix}.norm.weight''']
_A = checkpoint[F'''{old_prefix}.norm.bias''']
_A = weight_q.squeeze(-1 ).squeeze(-1 )
_A = bias_q.squeeze(-1 ).squeeze(-1 )
_A = weight_k.squeeze(-1 ).squeeze(-1 )
_A = bias_k.squeeze(-1 ).squeeze(-1 )
_A = weight_v.squeeze(-1 ).squeeze(-1 )
_A = bias_v.squeeze(-1 ).squeeze(-1 )
_A = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
_A = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _snake_case ( _snake_case : str , _snake_case : Any ) -> str:
'''simple docstring'''
_A = torch.load(_snake_case , map_location='cpu' )
_A = {}
_A = checkpoint['time_embed.0.weight']
_A = checkpoint['time_embed.0.bias']
_A = checkpoint['time_embed.2.weight']
_A = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_A = checkpoint['label_emb.weight']
_A = checkpoint['input_blocks.0.0.weight']
_A = checkpoint['input_blocks.0.0.bias']
_A = unet_config['down_block_types']
_A = unet_config['layers_per_block']
_A = unet_config['attention_head_dim']
_A = unet_config['block_out_channels']
_A = 1
_A = channels_list[0]
for i, layer_type in enumerate(_snake_case ):
_A = channels_list[i]
_A = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_snake_case ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_snake_case ):
_A = F'''down_blocks.{i}.resnets.{j}'''
_A = F'''input_blocks.{current_layer}.0'''
_A = True if j == 0 and downsample_block_has_skip else False
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
_A = F'''down_blocks.{i}.attentions.{j}'''
_A = F'''input_blocks.{current_layer}.1'''
_A = convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
_A = F'''down_blocks.{i}.downsamplers.0'''
_A = F'''input_blocks.{current_layer}.0'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
_A = current_channels
# hardcoded the mid-block for now
_A = 'mid_block.resnets.0'
_A = 'middle_block.0'
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
_A = 'mid_block.attentions.0'
_A = 'middle_block.1'
_A = convert_attention(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
_A = 'mid_block.resnets.1'
_A = 'middle_block.2'
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
_A = 0
_A = unet_config['up_block_types']
for i, layer_type in enumerate(_snake_case ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.1'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_A = F'''up_blocks.{i}.resnets.{j}'''
_A = F'''output_blocks.{current_layer}.0'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case , has_skip=_snake_case )
_A = F'''up_blocks.{i}.attentions.{j}'''
_A = F'''output_blocks.{current_layer}.1'''
_A = convert_attention(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
current_layer += 1
if i != len(_snake_case ) - 1:
_A = F'''up_blocks.{i}.upsamplers.0'''
_A = F'''output_blocks.{current_layer-1}.2'''
_A = convert_resnet(_snake_case , _snake_case , _snake_case , _snake_case )
_A = checkpoint['out.0.weight']
_A = checkpoint['out.0.bias']
_A = checkpoint['out.2.weight']
_A = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
a = parser.parse_args()
a = strabool(args.class_cond)
a = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
a = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
a = None
a = con_pt_to_diffuser(args.unet_path, unet_config)
a = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
a = CMStochasticIterativeScheduler(**scheduler_config)
a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 271
| 1
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10 ) -> int:
"""simple docstring"""
A__ = []
for _ in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10 ) -> List[str]:
"""simple docstring"""
A__ = []
for step in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(lowercase_ , '''schedule.bin''' )
torch.save(scheduler.state_dict() , lowercase_ )
A__ = torch.load(lowercase_ )
scheduler.load_state_dict(lowercase_ )
return lrs
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->Optional[int]:
'''simple docstring'''
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__))
for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assertAlmostEqual(UpperCAmelCase__ , UpperCAmelCase__ , delta=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
A__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase__)
A__ = torch.tensor([0.4, 0.2, -0.5])
A__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0)
for _ in range(100):
A__ = criterion(UpperCAmelCase__ , UpperCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2)
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
A__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase__)
A__ = torch.tensor([0.4, 0.2, -0.5])
A__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase__ , weight_decay=0.0 , relative_step=UpperCAmelCase__ , scale_parameter=UpperCAmelCase__ , warmup_init=UpperCAmelCase__ , )
for _ in range(1_000):
A__ = criterion(UpperCAmelCase__ , UpperCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2)
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase__ = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ = 10
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=None) ->Any:
'''simple docstring'''
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__))
for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assertAlmostEqual(UpperCAmelCase__ , UpperCAmelCase__ , delta=UpperCAmelCase__ , msg=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A__ , A__ = data
A__ = scheduler_func(self.optimizer , **UpperCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
A__ = unwrap_schedule(UpperCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
UpperCAmelCase__ , UpperCAmelCase__ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
A__ = scheduler_func(self.optimizer , **UpperCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase__) # wrap to test picklability of the schedule
A__ = unwrap_and_save_reload_schedule(UpperCAmelCase__ , self.num_steps)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ , msg=f"""failed for {scheduler_func} in save and reload""")
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : int) ->Tuple:
'''simple docstring'''
A__ = fn
def __call__( self : Optional[Any] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict) ->List[str]:
'''simple docstring'''
return self.fn(*UpperCAmelCase__ , **UpperCAmelCase__)
@classmethod
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str) ->str:
'''simple docstring'''
A__ = list(map(self , scheduler.lr_lambdas))
| 14
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def a (self : Any , *a__ : List[Any] , **a__ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_lowerCAmelCase : Dict = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class __magic_name__ ( unittest.TestCase , lowerCamelCase__ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =load_tool('text-question-answering' )
self.tool.setup()
__a =load_tool('text-question-answering' , remote=__lowerCamelCase )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
__a =self.tool(__lowerCamelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__lowerCamelCase , 'launched the BigScience Research Workshop' )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.remote_tool(__lowerCamelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__lowerCamelCase , 'launched the BigScience Research Workshop' )
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.tool(text=__lowerCamelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__lowerCamelCase , 'launched the BigScience Research Workshop' )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
__a =self.remote_tool(text=__lowerCamelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__lowerCamelCase , 'launched the BigScience Research Workshop' )
| 356
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
SCREAMING_SNAKE_CASE = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the training data.'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} )
SCREAMING_SNAKE_CASE = field(default=lowerCAmelCase_ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __magic_name__ ( self ) -> str:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__a =self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__a =self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __magic_name__ :
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE = field(
default=lowerCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCamelCase_( ):
"""simple docstring"""
__a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__a =training_args.get_process_log_level()
logger.setLevel(_snake_case )
datasets.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.set_verbosity(_snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__a ={'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__a =data_args.train_file.split('.' )[-1]
__a =data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__a =data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__a =load_dataset('csv' , data_files=_snake_case , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__a =load_dataset('json' , data_files=_snake_case , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__a =raw_datasets['train'].features['label'].names
__a =len(_snake_case )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__a =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_snake_case , )
__a =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__a ='max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__a =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__a ={'Refused': 0, 'Entailed': 1}
__a ={0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__a =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_snake_case : Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(_snake_case : Optional[Any] ):
__a =[_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__a =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__a =examples['statement']
__a =list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__a =tokenizer(_snake_case , _snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case )
__a =examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__a =raw_datasets.map(
_snake_case , batched=_snake_case , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__a =raw_datasets['train']
if data_args.max_train_samples is not None:
__a =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__a =raw_datasets['validation']
if data_args.max_eval_samples is not None:
__a =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__a =raw_datasets['test']
if data_args.max_predict_samples is not None:
__a =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_snake_case ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : EvalPrediction ):
__a =p.predictions[0] if isinstance(p.predictions , _snake_case ) else p.predictions
__a =np.argmax(_snake_case , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__a =default_data_collator
elif training_args.fpaa:
__a =DataCollatorWithPadding(_snake_case , pad_to_multiple_of=8 )
else:
__a =None
# Initialize our Trainer
__a =Trainer(
model=_snake_case , args=_snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_snake_case , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
__a =None
if training_args.resume_from_checkpoint is not None:
__a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a =last_checkpoint
__a =trainer.train(resume_from_checkpoint=_snake_case )
__a =train_result.metrics
__a =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_snake_case )
)
__a =min(_snake_case , len(_snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _snake_case )
trainer.save_metrics('train' , _snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__a =trainer.evaluate(eval_dataset=_snake_case )
__a =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_snake_case )
__a =min(_snake_case , len(_snake_case ) )
trainer.log_metrics('eval' , _snake_case )
trainer.save_metrics('eval' , _snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__a =predict_dataset.remove_columns('label' )
__a =trainer.predict(_snake_case , metric_key_prefix='predict' ).predictions
__a =np.argmax(_snake_case , axis=1 )
__a =os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(_snake_case , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(_snake_case ):
__a =label_list[item]
writer.write(F'{index}\t{item}\n' )
__a ={'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case )
else:
trainer.create_model_card(**_snake_case )
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 308
| 0
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowercase : List[str] ="\nimport os\n"
_lowercase : int ="\ndef foo():\n import os\n return False\n"
_lowercase : Any ="\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n"
_lowercase : Optional[int] ="\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n"
_lowercase : List[Any] ="\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n"
_lowercase : Optional[Any] ="\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n"
_lowercase : str ="\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n"
_lowercase : Any ="\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n"
_lowercase : int ="\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n"
_lowercase : Optional[Any] ="\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n"
_lowercase : Any =[
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , _lowercase)
def lowerCAmelCase_ ( _lowercase : str , _lowercase : Any) -> str:
"""simple docstring"""
a__ : Tuple = os.path.join(_lowercase , """test_file.py""")
with open(_lowercase , """w""") as _tmp_file:
_tmp_file.write(_lowercase)
a__ : List[str] = get_imports(_lowercase)
assert parsed_imports == ["os"]
| 170
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase : int ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] =["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowercase : Optional[int] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 170
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
_SCREAMING_SNAKE_CASE = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
_SCREAMING_SNAKE_CASE = '▁'
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase_ ) )
_A = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_A = len(self.sp_model ) - 1
_A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase_ )) + [1]
return [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1] + ([0] * len(lowerCAmelCase_ )) + [1]
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return len(self.sp_model )
def UpperCAmelCase ( self ) -> int:
_A = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_A = self.sp_model.PieceToId(lowerCAmelCase_ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_A = []
_A = """"""
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase_ ) + token
_A = True
_A = []
else:
current_sub_tokens.append(lowerCAmelCase_ )
_A = False
out_string += self.sp_model.decode(lowerCAmelCase_ )
return out_string.strip()
def __getstate__( self ) -> Any:
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self , lowerCAmelCase_ ) -> List[str]:
_A = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , """wb""" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 81
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :torch.FloatTensor
lowerCamelCase :torch.FloatTensor
class a ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = 1
@register_to_config
def __init__( self , lowerCAmelCase_ = 20_00 , lowerCAmelCase_ = 0.15 , lowerCAmelCase_ = 0.01 , lowerCAmelCase_ = 1348.0 , lowerCAmelCase_ = 1E-5 , lowerCAmelCase_ = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
_A = sigma_max
# setable values
_A = None
self.set_sigmas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Tuple:
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_A = torch.linspace(1 , lowerCAmelCase_ , lowerCAmelCase_ , device=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Any:
_A = sigma_min if sigma_min is not None else self.config.sigma_min
_A = sigma_max if sigma_max is not None else self.config.sigma_max
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ )
_A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_A = torch.exp(torch.linspace(math.log(lowerCAmelCase_ ) , math.log(lowerCAmelCase_ ) , lowerCAmelCase_ ) )
_A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
_A = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_A = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_A = timesteps.to(self.discrete_sigmas.device )
_A = self.discrete_sigmas[timesteps].to(sample.device )
_A = self.get_adjacent_sigma(lowerCAmelCase_ , lowerCAmelCase_ ).to(sample.device )
_A = torch.zeros_like(lowerCAmelCase_ )
_A = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_A = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_A = diffusion.unsqueeze(-1 )
_A = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_A = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase_ , device=sample.device , dtype=sample.dtype )
_A = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_A = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase_ , prev_sample_mean=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_A = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_A = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_A = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_A = step_size.unsqueeze(-1 )
_A = sample + step_size * model_output
_A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A = timesteps.to(original_samples.device )
_A = self.discrete_sigmas.to(original_samples.device )[timesteps]
_A = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase_ ) * sigmas[:, None, None, None]
)
_A = noise + original_samples
return noisy_samples
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 81
| 1
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_a = '.'
if __name__ == "__main__":
_a = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
_a = []
_a = []
with open(doctest_file_path) as fp:
for line in fp:
_a = line.strip()
_a = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_a = '\n'.join(non_existent_paths)
raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 61
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'imagegpt'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , lowercase_ : str=512 + 1 , lowercase_ : Optional[int]=32 * 32 , lowercase_ : Tuple=512 , lowercase_ : Any=24 , lowercase_ : List[str]=8 , lowercase_ : int=None , lowercase_ : Union[str, Any]="quick_gelu" , lowercase_ : int=0.1 , lowercase_ : Any=0.1 , lowercase_ : Any=0.1 , lowercase_ : List[Any]=1e-5 , lowercase_ : List[str]=0.0_2 , lowercase_ : int=True , lowercase_ : str=True , lowercase_ : Union[str, Any]=False , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[int]=False , **lowercase_ : int , ):
UpperCamelCase__ : Optional[Any] =vocab_size
UpperCamelCase__ : Tuple =n_positions
UpperCamelCase__ : Any =n_embd
UpperCamelCase__ : Union[str, Any] =n_layer
UpperCamelCase__ : Dict =n_head
UpperCamelCase__ : Any =n_inner
UpperCamelCase__ : List[str] =activation_function
UpperCamelCase__ : Tuple =resid_pdrop
UpperCamelCase__ : List[Any] =embd_pdrop
UpperCamelCase__ : Optional[int] =attn_pdrop
UpperCamelCase__ : Optional[Any] =layer_norm_epsilon
UpperCamelCase__ : Optional[int] =initializer_range
UpperCamelCase__ : int =scale_attn_weights
UpperCamelCase__ : Dict =use_cache
UpperCamelCase__ : Union[str, Any] =scale_attn_by_inverse_layer_idx
UpperCamelCase__ : int =reorder_and_upcast_attn
UpperCamelCase__ : List[Any] =tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase_ , **lowercase_ )
class __a ( snake_case__ ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : List[str] ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : "FeatureExtractionMixin" , lowercase_ : int = 1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional["TensorType"] = None , lowercase_ : int = 3 , lowercase_ : int = 32 , lowercase_ : int = 32 , ):
UpperCamelCase__ : Dict =self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase__ : str =dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
return inputs
| 157
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
UpperCAmelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def UpperCamelCase_ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = "backbone." if is_semantic else ""
_UpperCAmelCase : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(F"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(F"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(F"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : List[Any]=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
_UpperCAmelCase : List[str] = "backbone." if is_semantic else ""
# queries, keys and values
_UpperCAmelCase : int = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase : Dict = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" )
_UpperCAmelCase : Dict = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" )
_UpperCAmelCase : Dict = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Optional[int] = q_bias
_UpperCAmelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Dict = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_UpperCAmelCase : Any = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" )
_UpperCAmelCase : Tuple = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" )
_UpperCAmelCase : List[Any] = gamma_a
_UpperCAmelCase : Any = gamma_a
def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = dct.pop(_UpperCAmelCase )
_UpperCAmelCase : Optional[int] = val
def UpperCamelCase_ ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[str] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any]=False ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[str] = False if "rvlcdip" in checkpoint_url else True
_UpperCAmelCase : Tuple = BeitConfig(use_absolute_position_embeddings=_UpperCAmelCase , use_mask_token=_UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_UpperCAmelCase : Optional[int] = 1_024
_UpperCAmelCase : Union[str, Any] = 4_096
_UpperCAmelCase : Tuple = 24
_UpperCAmelCase : int = 16
# labels
if "rvlcdip" in checkpoint_url:
_UpperCAmelCase : int = 16
_UpperCAmelCase : Optional[int] = "huggingface/label-files"
_UpperCAmelCase : Dict = "rvlcdip-id2label.json"
_UpperCAmelCase : Any = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Optional[Any] = idalabel
_UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase : Optional[int] = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="cpu" )["model"]
_UpperCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase , has_lm_head=_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , has_lm_head=_UpperCAmelCase )
# load HuggingFace model
_UpperCAmelCase : Union[str, Any] = BeitForMaskedImageModeling(_UpperCAmelCase ) if has_lm_head else BeitForImageClassification(_UpperCAmelCase )
model.eval()
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image
_UpperCAmelCase : int = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Any = image_processor(images=_UpperCAmelCase , return_tensors="pt" )
_UpperCAmelCase : Optional[int] = encoding["pixel_values"]
_UpperCAmelCase : List[str] = model(_UpperCAmelCase )
_UpperCAmelCase : str = outputs.logits
# verify logits
_UpperCAmelCase : Any = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(_UpperCAmelCase ), "Shape of logits not as expected"
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
if has_lm_head:
_UpperCAmelCase : Optional[int] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
_UpperCAmelCase : List[str] = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
__SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 31
|
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 31
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
A = logging.get_logger(__name__)
A = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __lowercase ( UpperCamelCase_ ):
'''simple docstring'''
__lowerCAmelCase = """perceiver"""
def __init__( self , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=768 , _UpperCAmelCase=1 , _UpperCAmelCase=26 , _UpperCAmelCase=8 , _UpperCAmelCase=8 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="kv" , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=True , _UpperCAmelCase=262 , _UpperCAmelCase=2048 , _UpperCAmelCase=56 , _UpperCAmelCase=[368, 496] , _UpperCAmelCase=16 , _UpperCAmelCase=1920 , _UpperCAmelCase=16 , _UpperCAmelCase=[1, 16, 224, 224] , **_UpperCAmelCase , ):
super().__init__(**_a )
__a : Optional[Any] = num_latents
__a : Tuple = d_latents
__a : Optional[int] = d_model
__a : List[Any] = num_blocks
__a : str = num_self_attends_per_block
__a : str = num_self_attention_heads
__a : Dict = num_cross_attention_heads
__a : Dict = qk_channels
__a : List[Any] = v_channels
__a : Any = cross_attention_shape_for_attention
__a : List[Any] = self_attention_widening_factor
__a : Tuple = cross_attention_widening_factor
__a : Tuple = hidden_act
__a : Dict = attention_probs_dropout_prob
__a : List[str] = initializer_range
__a : str = layer_norm_eps
__a : int = use_query_residual
# masked language modeling attributes
__a : int = vocab_size
__a : Optional[int] = max_position_embeddings
# image classification attributes
__a : str = image_size
# flow attributes
__a : int = train_size
# multimodal autoencoding attributes
__a : Optional[Any] = num_frames
__a : Optional[int] = audio_samples_per_frame
__a : Union[str, Any] = samples_per_patch
__a : Optional[int] = output_shape
class __lowercase ( UpperCamelCase_ ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
__a : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def _lowerCamelCase ( self ):
return 1e-4
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = 3 , _UpperCAmelCase = 40 , _UpperCAmelCase = 40 , ):
if isinstance(_a , _a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a : Optional[int] = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a : List[str] = preprocessor.num_special_tokens_to_add(_a )
__a : Union[str, Any] = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a )
# Generate dummy inputs according to compute batch and sequence
__a : str = [""" """.join(['''a'''] ) * seq_length] * batch_size
__a : Optional[int] = dict(preprocessor(_a , return_tensors=_a ) )
__a : List[str] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(_a , _a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a : Dict = compute_effective_axis_dimension(_a , fixed_dimension=OnnxConfig.default_fixed_batch )
__a : Any = self._generate_dummy_images(_a , _a , _a , _a )
__a : Optional[Any] = dict(preprocessor(images=_a , return_tensors=_a ) )
__a : Dict = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 362
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ['''image_processor''', '''tokenizer''']
__lowerCAmelCase = '''CLIPImageProcessor'''
__lowerCAmelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
__a : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _UpperCAmelCase , )
__a : Any = kwargs.pop('''feature_extractor''' )
__a : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__a : Any = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
__a : List[str] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__a : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.tokenizer.model_input_names
__a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 188
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_SCREAMING_SNAKE_CASE = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
_lowerCAmelCase = []
for num in range(len(SCREAMING_SNAKE_CASE_ ) ):
_lowerCAmelCase = 0
while 2 * i * i <= odd_composites[num]:
_lowerCAmelCase = odd_composites[num] - 2 * i * i
if is_prime(SCREAMING_SNAKE_CASE_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(SCREAMING_SNAKE_CASE_ ) == n:
return list_nums
return []
def __a():
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 158
|
'''simple docstring'''
import math
import unittest
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> str:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _snake_case ( self ) -> List[Any]:
with self.assertRaises(_lowerCAmelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 158
| 1
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = VQModel
__magic_name__ :List[Any] = """sample"""
@property
def snake_case ( self , __UpperCAmelCase=(3_2, 3_2) ):
'''simple docstring'''
lowerCAmelCase__ :str = 4
lowerCAmelCase__ :Any = 3
lowerCAmelCase__ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCAmelCase )
return {"sample": image}
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def snake_case ( self ):
'''simple docstring'''
return (3, 3_2, 3_2)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
lowerCAmelCase__ :List[Any] = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Dict = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(__UpperCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCAmelCase__ :Optional[int] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCAmelCase__ :Union[str, Any] = image.to(__UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Dict = model(__UpperCAmelCase ).sample
lowerCAmelCase__ :int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ :Optional[int] = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
| 254
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 254
| 1
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Dict=[] ) -> Optional[int]:
UpperCamelCase__ : Dict = size[0] - overlap_pixels * 2
UpperCamelCase__ : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCamelCase__ : Any = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
UpperCamelCase__ : List[str] = np.pad(lowercase__ , mode='''linear_ramp''' , pad_width=lowercase__ , end_values=0 )
if "l" in remove_borders:
UpperCamelCase__ : Tuple = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCamelCase__ : Any = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCamelCase__ : Optional[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCamelCase__ : Union[str, Any] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Optional[Any] ) -> str:
return max(lowercase__ , min(lowercase__ , lowercase__ ) )
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Any ) -> Tuple:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> Union[str, Any]:
UpperCamelCase__ : Any = list(lowercase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCamelCase__ : str = clamp_rect(lowercase__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: Any , __UpperCAmelCase: Tuple , __UpperCAmelCase: Tuple ) -> Optional[Any]:
UpperCamelCase__ : Tuple = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowercase__ , (original_slice, 0) )
return result
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: Union[str, Any] ) -> Dict:
UpperCamelCase__ : Optional[int] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCamelCase__ : Optional[int] = tile.crop(lowercase__ )
return tile
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: Tuple ) -> List[str]:
UpperCamelCase__ : Tuple = n % d
return n - divisor
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = 350, ) -> List[str]:
"""simple docstring"""
super().__init__(
vae=__magic_name__, text_encoder=__magic_name__, tokenizer=__magic_name__, unet=__magic_name__, low_res_scheduler=__magic_name__, scheduler=__magic_name__, max_noise_level=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = (
min(image.size[0] - (tile_size + original_image_slice), x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice), y * tile_size ),
min(image.size[0], (x + 1) * tile_size ),
min(image.size[1], (y + 1) * tile_size ),
)
UpperCamelCase__ : str = add_overlap_rect(__magic_name__, __magic_name__, image.size )
UpperCamelCase__ : Dict = image.crop(__magic_name__ )
UpperCamelCase__ : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCamelCase__ : List[str] = translated_slice_x - (original_image_slice / 2)
UpperCamelCase__ : List[Any] = max(0, __magic_name__ )
UpperCamelCase__ : Optional[Any] = squeeze_tile(__magic_name__, __magic_name__, __magic_name__, __magic_name__ )
UpperCamelCase__ : int = to_input.size
UpperCamelCase__ : Union[str, Any] = to_input.resize((tile_size, tile_size), Image.BICUBIC )
UpperCamelCase__ : Union[str, Any] = super(__magic_name__, self ).__call__(image=__magic_name__, **__magic_name__ ).images[0]
UpperCamelCase__ : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4), Image.BICUBIC )
UpperCamelCase__ : List[Any] = unsqueeze_tile(__magic_name__, __magic_name__ )
UpperCamelCase__ : Dict = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4), Image.BICUBIC )
UpperCamelCase__ : Dict = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
UpperCamelCase__ : str = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]), tile_border * 4, remove_borders=__magic_name__ ), mode='''L''', )
final_image.paste(
__magic_name__, (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4), __magic_name__ )
@torch.no_grad()
def __call__( self, __magic_name__, __magic_name__, __magic_name__ = 75, __magic_name__ = 9.0, __magic_name__ = 50, __magic_name__ = None, __magic_name__ = 1, __magic_name__ = 0.0, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = 1, __magic_name__ = 128, __magic_name__ = 32, __magic_name__ = 32, ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any = Image.new('''RGB''', (image.size[0] * 4, image.size[1] * 4) )
UpperCamelCase__ : Optional[int] = math.ceil(image.size[0] / tile_size )
UpperCamelCase__ : Optional[Any] = math.ceil(image.size[1] / tile_size )
UpperCamelCase__ : Dict = tcx * tcy
UpperCamelCase__ : List[str] = 0
for y in range(__magic_name__ ):
for x in range(__magic_name__ ):
self._process_tile(
__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, prompt=__magic_name__, num_inference_steps=__magic_name__, guidance_scale=__magic_name__, noise_level=__magic_name__, negative_prompt=__magic_name__, num_images_per_prompt=__magic_name__, eta=__magic_name__, generator=__magic_name__, latents=__magic_name__, )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def lowerCAmelCase_ ( ) -> Dict:
# Run a demo
UpperCamelCase__ : int = 'stabilityai/stable-diffusion-x4-upscaler'
UpperCamelCase__ : Optional[Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(lowercase__ , revision='''fp16''' , torch_dtype=torch.floataa )
UpperCamelCase__ : Optional[int] = pipe.to('''cuda''' )
UpperCamelCase__ : List[Any] = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(__UpperCAmelCase: str ):
print(f"progress: {obj['progress']:.4f}" )
obj["image"].save('''diffusers_library_progress.jpg''' )
UpperCamelCase__ : str = pipe(image=lowercase__ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=lowercase__ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 201
|
"""simple docstring"""
import functools
from typing import Any
def _snake_case ( lowercase__ , lowercase__ ):
# Validation
if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(lowercase__ , lowercase__ ) or not all(
isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
_lowerCamelCase : dict[str, Any] = {}
_lowerCamelCase : List[Any] = 'WORD_KEEPER'
for word in words:
_lowerCamelCase : Dict = trie
for c in word:
if c not in trie_node:
_lowerCamelCase : Any = {}
_lowerCamelCase : str = trie_node[c]
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Dict = len(lowercase__ )
# Dynamic programming method
@functools.cache
def is_breakable(lowercase__ ) -> bool:
if index == len_string:
return True
_lowerCamelCase : List[Any] = trie
for i in range(lowercase__ , lowercase__ ):
_lowerCamelCase : Any = trie_node.get(string[i] , lowercase__ )
if trie_node is None:
return False
if trie_node.get(lowercase__ , lowercase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
| 0
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowercase__ :Tuple = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ = 1_0_1):
lowercase = length
def __len__( self):
return self.length
def __getitem__( self ,A__):
return i
class lowercase :
def __call__( self ,A__):
return {"input_ids": torch.tensor(A__), "labels": torch.tensor(A__)}
class lowercase ( nn.Module ):
def __init__( self):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase = nn.Linear(1_2_0 ,8_0)
def A__ ( self ,A__ ,A__=None):
if labels is not None:
return torch.tensor(0.0 ,device=input_ids.device), input_ids
else:
return input_ids
class lowercase ( SCREAMING_SNAKE_CASE__ ):
@require_torch_neuroncore
def A__ ( self):
lowercase = f'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'--output_dir {output_dir}'.split()
lowercase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(A__ ,env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase ( SCREAMING_SNAKE_CASE__ ):
@require_torch_multi_gpu
def A__ ( self):
lowercase = f'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase = self.get_auto_remove_tmp_dir()
lowercase = f'--output_dir {output_dir}'.split()
lowercase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(A__ ,env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowercase__ :List[str] = HfArgumentParser((TrainingArguments,))
lowercase__ :Any = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
F'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowercase__ :Tuple = DummyDataset(dataset_length)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = list(range(len(lowerCAmelCase__ ) ) )
lowercase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
lowercase__ :List[str] = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowercase__ :Tuple = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase__ :Any = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase__ :Union[str, Any] = 2
lowercase__ :Union[str, Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase__ :Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase__ :List[str] = None
| 364
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self ,A__ ,A__=9_9 ,A__=1_3 ,A__=1_6 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=False ,A__=True ,A__=2 ,A__=3_2 ,A__=4 ,A__=4 ,A__=3_0 ,A__=0 ,A__=1 ,A__=2 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = decoder_seq_length
# For common tests
lowercase = self.decoder_seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_labels
lowercase = vocab_size
lowercase = d_model
lowercase = d_model
lowercase = decoder_layers
lowercase = decoder_layers
lowercase = decoder_ffn_dim
lowercase = decoder_attention_heads
lowercase = decoder_attention_heads
lowercase = eos_token_id
lowercase = bos_token_id
lowercase = pad_token_id
lowercase = decoder_start_token_id
lowercase = use_cache
lowercase = max_position_embeddings
lowercase = None
lowercase = decoder_seq_length
lowercase = 2
lowercase = 1
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size)
lowercase = None
if self.use_attention_mask:
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,vocab_size=2)
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size)
lowercase = TrOCRConfig(
vocab_size=self.vocab_size ,d_model=self.d_model ,decoder_layers=self.decoder_layers ,decoder_ffn_dim=self.decoder_ffn_dim ,decoder_attention_heads=self.decoder_attention_heads ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,use_cache=self.use_cache ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,max_position_embeddings=self.max_position_embeddings ,)
return (config, input_ids, attention_mask, lm_labels)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,):
lowercase = True
lowercase = TrOCRDecoder(config=A__).to(A__).eval()
lowercase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase = model(A__ ,use_cache=A__)
lowercase = model(A__)
lowercase = model(A__ ,use_cache=A__)
self.parent.assertTrue(len(A__) == len(A__))
self.parent.assertTrue(len(A__) == len(A__) + 1)
lowercase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase = ids_tensor((2, 1) ,config.vocab_size - 1) + 1
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] ,dim=-1)
lowercase = model(A__)['''last_hidden_state''']
lowercase = model(A__ ,past_key_values=A__)['''last_hidden_state''']
# select random slice
lowercase = ids_tensor((1,) ,output_from_past.shape[-1]).item()
lowercase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(A__ ,A__ ,atol=1E-3)
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase_ : Dict =(TrOCRForCausalLM,) if is_torch_available() else ()
lowercase_ : int ={'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowercase_ : List[Any] =True
lowercase_ : int =False
def A__ ( self):
lowercase = TrOCRStandaloneDecoderModelTester(self ,is_training=A__)
lowercase = ConfigTester(self ,config_class=A__)
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*A__)
def A__ ( self):
return
@unittest.skip('''The model doesn\'t support left padding''') # and it's not used enough to be worth fixing :)
def A__ ( self):
pass
| 97
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=True, _UpperCAmelCase="pt" ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : List[str] = {'add_prefix_space': True} if isinstance(_UpperCAmelCase, _UpperCAmelCase ) and not line.startswith(' ' ) else {}
lowerCAmelCase : Dict = padding_side
return tokenizer(
[line], max_length=_UpperCAmelCase, padding='max_length' if pad_to_max_length else None, truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, **_UpperCAmelCase, )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, ) -> Any:
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __A ( lowerCAmelCase ):
def __init__( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]="train" , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Dict="" , ):
super().__init__()
lowerCAmelCase : int = Path(UpperCAmelCase_ ).joinpath(type_path + '.source' )
lowerCAmelCase : Optional[int] = Path(UpperCAmelCase_ ).joinpath(type_path + '.target' )
lowerCAmelCase : Optional[int] = self.get_char_lens(self.src_file )
lowerCAmelCase : Tuple = max_source_length
lowerCAmelCase : List[Any] = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
lowerCAmelCase : Union[str, Any] = tokenizer
lowerCAmelCase : List[Any] = prefix
if n_obs is not None:
lowerCAmelCase : str = self.src_lens[:n_obs]
lowerCAmelCase : Union[str, Any] = src_lang
lowerCAmelCase : int = tgt_lang
def __len__( self : Optional[Any] ):
return len(self.src_lens )
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : str ):
lowerCAmelCase : List[str] = index + 1 # linecache starts at 1
lowerCAmelCase : Any = self.prefix + linecache.getline(str(self.src_file ) , UpperCAmelCase_ ).rstrip('\n' )
lowerCAmelCase : int = linecache.getline(str(self.tgt_file ) , UpperCAmelCase_ ).rstrip('\n' )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , UpperCAmelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowerCAmelCase : Union[str, Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCAmelCase_ ) else self.tokenizer
)
lowerCAmelCase : Tuple = self.tokenizer.generator if isinstance(self.tokenizer , UpperCAmelCase_ ) else self.tokenizer
lowerCAmelCase : int = encode_line(UpperCAmelCase_ , UpperCAmelCase_ , self.max_source_length , 'right' )
lowerCAmelCase : Any = encode_line(UpperCAmelCase_ , UpperCAmelCase_ , self.max_target_length , 'right' )
lowerCAmelCase : Any = source_inputs['input_ids'].squeeze()
lowerCAmelCase : Optional[Any] = target_inputs['input_ids'].squeeze()
lowerCAmelCase : Any = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase__ ( UpperCAmelCase_ : Union[str, Any] ):
return [len(UpperCAmelCase_ ) for x in Path(UpperCAmelCase_ ).open().readlines()]
def lowercase__ ( self : Dict , UpperCAmelCase_ : str ):
lowerCAmelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch] )
lowerCAmelCase : str = torch.stack([x['attention_mask'] for x in batch] )
lowerCAmelCase : List[str] = torch.stack([x['decoder_input_ids'] for x in batch] )
lowerCAmelCase : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , UpperCAmelCase_ )
else self.tokenizer.pad_token_id
)
lowerCAmelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , UpperCAmelCase_ )
else self.tokenizer.pad_token_id
)
lowerCAmelCase : List[str] = trim_batch(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase : List[str] = trim_batch(UpperCAmelCase_ , UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__A : Tuple = getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> None:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = get_git_info()
save_json(_UpperCAmelCase, os.path.join(_UpperCAmelCase, 'git_log.json' ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=4, **_UpperCAmelCase ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase, 'w' ) as f:
json.dump(_UpperCAmelCase, _UpperCAmelCase, indent=_UpperCAmelCase, **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Any = git.Repo(search_parent_directories=_UpperCAmelCase )
lowerCAmelCase : Dict = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase, _UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
with open(_UpperCAmelCase, 'wb' ) as f:
return pickle.dump(_UpperCAmelCase, _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase ):
return re.sub(r'\b(a|an|the)\b', ' ', _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase ):
lowerCAmelCase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : List[str] = normalize_answer(_UpperCAmelCase ).split()
lowerCAmelCase : Optional[Any] = normalize_answer(_UpperCAmelCase ).split()
lowerCAmelCase : List[str] = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
lowerCAmelCase : str = sum(common.values() )
if num_same == 0:
return 0
lowerCAmelCase : Tuple = 1.0 * num_same / len(_UpperCAmelCase )
lowerCAmelCase : str = 1.0 * num_same / len(_UpperCAmelCase )
lowerCAmelCase : int = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase : str = 0
for hypo, pred in zip(_UpperCAmelCase, _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase, _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return model_prefix.startswith('rag' )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowerCAmelCase : Union[str, Any] = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase, _UpperCAmelCase ) and not hasattr(_UpperCAmelCase, equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase, _UpperCAmelCase )
continue
lowerCAmelCase : Any = p if hasattr(_UpperCAmelCase, _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase, _UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
delattr(_UpperCAmelCase, _UpperCAmelCase )
return hparams, config
| 138
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __A :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=0 , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : Optional[int] = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : Optional[Any] = use_input_mask
lowerCAmelCase : Union[str, Any] = use_token_type_ids
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : Dict = num_labels
lowerCAmelCase : List[Any] = num_choices
lowerCAmelCase : Optional[Any] = scope
lowerCAmelCase : Optional[Any] = projection_dim
def lowercase__ ( self : Any ):
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Any = None
if self.use_token_type_ids:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
lowerCAmelCase : Tuple = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ):
lowerCAmelCase : int = TFDPRContextEncoder(config=UpperCAmelCase_ )
lowerCAmelCase : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : List[str] = TFDPRQuestionEncoder(config=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : int = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
lowerCAmelCase : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def lowercase__ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[int] = TFDPRReader(config=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase : str = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : int = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : str = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = TFDPRModelTester(self )
lowerCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase_ )
def lowercase__ ( self : Any ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase_ )
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase_ )
@slow
def lowercase__ ( self : List[Any] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : str = TFDPRContextEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Any = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[int] = TFDPRReader.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class __A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
lowerCAmelCase : List[Any] = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase : Optional[Any] = model(UpperCAmelCase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase : List[Any] = tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 138
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class a ( UpperCAmelCase ):
_lowercase = "realm"
def __init__( self , A_=30522 , A_=768 , A_=128 , A_=12 , A_=12 , A_=8 , A_=3072 , A_="gelu_new" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=256 , A_=10 , A_=1e-3 , A_=5 , A_=320 , A_=13353718 , A_=5000 , A_=1 , A_=0 , A_=2 , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
# Common config
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Tuple = retriever_proj_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : List[Any] = num_candidates
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : int = type_vocab_size
_UpperCAmelCase : List[Any] = layer_norm_eps
# Reader config
_UpperCAmelCase : Optional[Any] = span_hidden_size
_UpperCAmelCase : Dict = max_span_width
_UpperCAmelCase : Union[str, Any] = reader_layer_norm_eps
_UpperCAmelCase : List[Any] = reader_beam_size
_UpperCAmelCase : Any = reader_seq_len
# Retrieval config
_UpperCAmelCase : Tuple = num_block_records
_UpperCAmelCase : List[str] = searcher_beam_size
| 357
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any , lowerCAmelCase: List[str] , lowerCAmelCase: int=8 ) -> int:
_UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a ( UpperCAmelCase ):
def __init__( self , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
_UpperCAmelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if latents is None:
_UpperCAmelCase : Any = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase : Optional[int] = latents.to(A_ )
_UpperCAmelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase : Union[str, Any] = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def _UpperCAmelCase ( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_UpperCAmelCase : str = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase : Dict = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
_UpperCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ , A_ , A_ = 512 , A_ = 512 , A_ = 100 , A_ = 4.0 , A_ = 1 , A_ = None , A_ = None , A_ = "pil" , A_ = True , ):
'''simple docstring'''
_UpperCAmelCase : str = self._execution_device
_UpperCAmelCase : Tuple = guidance_scale > 1.0
if isinstance(A_ , A_ ):
_UpperCAmelCase : Union[str, Any] = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_UpperCAmelCase : Dict = torch.cat(A_ , dim=0 )
if isinstance(A_ , A_ ):
_UpperCAmelCase : Any = torch.cat(A_ , dim=0 )
_UpperCAmelCase : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : Tuple = hint.repeat_interleave(A_ , dim=0 )
_UpperCAmelCase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
_UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
_UpperCAmelCase : Dict = self.scheduler.timesteps
_UpperCAmelCase : Union[str, Any] = self.movq.config.latent_channels
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
_UpperCAmelCase : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase : Optional[Any] = {"image_embeds": image_embeds, "hint": hint}
_UpperCAmelCase : Optional[int] = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase : Tuple = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = variance_pred.chunk(2 )
_UpperCAmelCase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
_UpperCAmelCase : Optional[Any] = self.movq.decode(A_ , force_not_quantize=A_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_UpperCAmelCase : Union[str, Any] = image * 0.5 + 0.5
_UpperCAmelCase : Dict = image.clamp(0 , 1 )
_UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 189
| 0
|
'''simple docstring'''
from statistics import mean, stdev
def UpperCAmelCase_ (__a : list , __a : int = 3 ):
"""simple docstring"""
_a : Optional[Any] = min(__a )
_a : int = max(__a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __a ) for x in data]
def UpperCAmelCase_ (__a : list , __a : int = 3 ):
"""simple docstring"""
_a : Dict = mean(__a )
_a : Union[str, Any] = stdev(__a )
# standardize data
return [round((x - mu) / (sigma) , __a ) for x in data]
| 271
|
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[Any] = None
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a ,'feature_size' ) )
self.assertTrue(hasattr(_a ,'sampling_rate' ) )
self.assertTrue(hasattr(_a ,'padding_value' ) )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_tester.prepare_inputs_for_common()
_a : str = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a ,processed_features[input_name] ) ) )
_a : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Union[str, Any] = BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
_a : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : str = BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
_a : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = feat_extract.model_input_names[0]
_a : int = BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
_a : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowercase ( self : Dict ,_a : Any=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : Tuple ):
_a : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : Optional[Any] ,_a : Union[str, Any] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : int = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Tuple = BatchFeature({input_name: speech_inputs} )
_a : str = self.feat_extract_tester.seq_length_diff
_a : Dict = self.feat_extract_tester.max_seq_length + pad_diff
_a : Dict = self.feat_extract_tester.min_seq_length
_a : Optional[Any] = self.feat_extract_tester.batch_size
_a : Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_a : int = feat_extract.pad(_a ,padding=_a )
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(_a ,padding='longest' )
_a : Any = input_a[input_name]
_a : Optional[Any] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
_a : List[str] = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
_a : str = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' )[input_name]
_a : int = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,return_tensors='np' )
_a : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_a : Tuple = feat_extract.pad(_a ,pad_to_multiple_of=10 )
_a : List[str] = input_a[input_name]
_a : str = feat_extract.pad(_a ,padding='longest' ,pad_to_multiple_of=10 )
_a : Tuple = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a )
_a : Any = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a ,return_tensors='np' ,)
_a : Dict = input_a[input_name]
self.assertTrue(all(len(_a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
_a : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_a : Any = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __lowercase ( self : List[Any] ,_a : Optional[int]=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : List[str] ):
_a : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : List[str] ,_a : List[str] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Any = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_a : Union[str, Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=_a )
_a : str = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
_a : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to smallest with np
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=_a ,)
_a : Any = input_a[input_name]
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
_a : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to middle
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a ,return_tensors='np' ,)
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a )
_a : Tuple = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
_a : Dict = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' ,truncation=_a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_a : Optional[Any] = 12
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,truncation=_a ,)
_a : Tuple = input_a[input_name]
_a : str = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,)
_a : List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_a : List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_a : Union[str, Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
self._check_truncation(numpify=_a )
def __lowercase ( self : str ):
'''simple docstring'''
self._check_truncation(numpify=_a )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Optional[int] = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : Dict = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : Any = feat_extract.pad(_a ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = self.feat_extract_dict
_a : List[Any] = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Tuple = [len(_a ) for x in speech_inputs]
_a : int = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : str = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_dict
_a : Tuple = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = [len(_a ) for x in speech_inputs]
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Any = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = min(_a )
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,truncation=_a ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 271
| 1
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Tuple =GPTSwaTokenizer
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Optional[int] =True
UpperCamelCase__ : Tuple =False
def __a ( self :str) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :List[Any] , _lowercase :int) -> str:
UpperCAmelCase_ = '''This is a test'''
UpperCAmelCase_ = '''This is a test'''
return input_text, output_text
def __a ( self :Optional[Any]) -> str:
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :int) -> int:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(_lowercase) , 2000)
def __a ( self :Union[str, Any]) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 2000)
def __a ( self :Dict) -> str:
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase) , [465, 287, 265, 631, 842])
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
_lowercase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
self.assertListEqual(
_lowercase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
# fmt: off
self.assertListEqual(
_lowercase , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def __a ( self :str) -> Any:
UpperCAmelCase_ = GPTSwaTokenizer(_lowercase)
UpperCAmelCase_ = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
UpperCAmelCase_ = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_lowercase , _lowercase):
self.assertListEqual(tokenizer.encode_fast(_lowercase) , _lowercase)
# Test that decode_fast returns the input text
for text, token_ids in zip(_lowercase , _lowercase):
self.assertEqual(tokenizer.decode_fast(_lowercase) , _lowercase)
@slow
def __a ( self :Optional[Any]) -> Optional[int]:
UpperCAmelCase_ = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=_lowercase , )
| 350
|
def A ( __UpperCAmelCase = 100_0000 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 344
| 0
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ : List[str] ='''\
Text data.
Second line of data.'''
lowerCAmelCase__ : Tuple ='''file'''
@pytest.fixture(scope='session' )
def __lowercase ( a__ ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data' ) / (FILE_PATH + """.zstd""")
__SCREAMING_SNAKE_CASE = bytes(lowerCAmelCase_ , 'utf-8' )
with zstd.open(lowerCAmelCase_ , 'wb' ) as f:
f.write(lowerCAmelCase_ )
return path
@pytest.fixture
def __lowercase ( a__ ) -> int:
with open(os.path.join(tmpfs.local_root_dir , lowerCAmelCase_ ) , 'w' ) as f:
f.write(lowerCAmelCase_ )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
__SCREAMING_SNAKE_CASE = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
__SCREAMING_SNAKE_CASE = input_paths[compression_format]
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = DownloadConfig(cache_dir=lowerCAmelCase_ , extract_compressed_file=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = cached_path(lowerCAmelCase_ , download_config=lowerCAmelCase_ )
with open(lowerCAmelCase_ ) as f:
__SCREAMING_SNAKE_CASE = f.read()
with open(lowerCAmelCase_ ) as f:
__SCREAMING_SNAKE_CASE = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def __lowercase ( a__ , a__ , a__ , a__ , a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = """custom_cache"""
__SCREAMING_SNAKE_CASE = """custom_extracted_dir"""
__SCREAMING_SNAKE_CASE = tmp_path / """custom_extracted_path"""
if default_extracted:
__SCREAMING_SNAKE_CASE = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , lowerCAmelCase_ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowerCAmelCase_ ) )
__SCREAMING_SNAKE_CASE = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__SCREAMING_SNAKE_CASE = xz_file
__SCREAMING_SNAKE_CASE = (
DownloadConfig(extract_compressed_file=lowerCAmelCase_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCAmelCase_ )
)
__SCREAMING_SNAKE_CASE = cached_path(lowerCAmelCase_ , download_config=lowerCAmelCase_ )
assert Path(lowerCAmelCase_ ).parent.parts[-2:] == expected
def __lowercase ( a__ ) -> Optional[int]:
# absolute path
__SCREAMING_SNAKE_CASE = str(Path(lowerCAmelCase_ ).resolve() )
assert cached_path(lowerCAmelCase_ ) == text_file
# relative path
__SCREAMING_SNAKE_CASE = str(Path(lowerCAmelCase_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowerCAmelCase_ ) == text_file
def __lowercase ( a__ ) -> str:
# absolute path
__SCREAMING_SNAKE_CASE = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(lowerCAmelCase_ ):
cached_path(lowerCAmelCase_ )
# relative path
__SCREAMING_SNAKE_CASE = """./__missing_file__.txt"""
with pytest.raises(lowerCAmelCase_ ):
cached_path(lowerCAmelCase_ )
def __lowercase ( a__ ) -> Any:
__SCREAMING_SNAKE_CASE = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(lowerCAmelCase_ ) as f:
__SCREAMING_SNAKE_CASE = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCAmelCase_ )
def __lowercase ( ) -> Tuple:
with pytest.raises(lowerCAmelCase_ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCAmelCase_ )
def __lowercase ( a__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(lowerCAmelCase_ ):
http_get('https://huggingface.co' , temp_file=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCAmelCase_ )
def __lowercase ( a__ ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(lowerCAmelCase_ ):
ftp_get('ftp://huggingface.co' , temp_file=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCAmelCase_ )
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(lowerCAmelCase_ ):
fsspec_get('s3://huggingface.co' , temp_file=lowerCAmelCase_ )
with pytest.raises(lowerCAmelCase_ ):
fsspec_head('s3://huggingface.co' )
| 257
|
def snake_case_ ( lowerCAmelCase_ : list ):
if len(lowerCAmelCase_ ) <= 1:
return [tuple(lowerCAmelCase_ )]
__lowercase : Any = []
def generate(lowerCAmelCase_ : int , lowerCAmelCase_ : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowerCAmelCase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__lowercase , __lowercase : List[str] = arr[k - 1], arr[i]
else: # k is odd
__lowercase , __lowercase : Any = arr[k - 1], arr[0]
generate(k - 1 , lowerCAmelCase_ )
generate(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
return res
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 233
| 0
|
"""simple docstring"""
_a : Optional[int] = [0, 2, 4, 6, 8]
_a : Tuple = [1, 3, 5, 7, 9]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCamelCase = 0
for digit in range(10 ):
_UpperCamelCase = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, __snake_case, __snake_case )
return result
_UpperCamelCase = 0
for digita in range(10 ):
_UpperCamelCase = digita
if (remainder + digita) % 2 == 0:
_UpperCamelCase = ODD_DIGITS
else:
_UpperCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
_UpperCamelCase = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, __snake_case, __snake_case, )
return result
def lowerCamelCase__ ( __snake_case = 9 ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(__snake_case, 0, [0] * length, __snake_case )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 364
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 100
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : str =UnCLIPImageVariationPipeline
lowercase : Optional[Any] =IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
lowercase : List[str] =IMAGE_VARIATION_BATCH_PARAMS
lowercase : Optional[int] =[
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 100
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
return CLIPTextModelWithProjection(lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, )
return CLIPVisionModelWithProjection(lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
lowerCamelCase_ =UnCLIPTextProjModel(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
lowerCamelCase_ =UNetaDConditionModel(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(1 )
lowerCamelCase_ =UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_decoder
lowerCamelCase_ =self.dummy_text_proj
lowerCamelCase_ =self.dummy_text_encoder
lowerCamelCase_ =self.dummy_tokenizer
lowerCamelCase_ =self.dummy_super_res_first
lowerCamelCase_ =self.dummy_super_res_last
lowerCamelCase_ =UnCLIPScheduler(
variance_type='''learned_range''', prediction_type='''epsilon''', num_train_timesteps=1_000, )
lowerCamelCase_ =UnCLIPScheduler(
variance_type='''fixed_small_log''', prediction_type='''epsilon''', num_train_timesteps=1_000, )
lowerCamelCase_ =CLIPImageProcessor(crop_size=32, size=32 )
lowerCamelCase_ =self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0, lowerCAmelCase=True ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
if pil_image:
lowerCamelCase_ =input_image * 0.5 + 0.5
lowerCamelCase_ =input_image.clamp(0, 1 )
lowerCamelCase_ =input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase_ =DiffusionPipeline.numpy_to_pil(lowerCAmelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =pipe(**lowerCAmelCase )
lowerCamelCase_ =output.images
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =pipe(
**lowerCAmelCase, return_dict=lowerCAmelCase, )[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ =np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =pipe(**lowerCAmelCase )
lowerCamelCase_ =output.images
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =pipe(
**lowerCAmelCase, return_dict=lowerCAmelCase, )[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ =np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =[
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
lowerCamelCase_ =pipe(**lowerCAmelCase )
lowerCamelCase_ =output.images
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =[
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
lowerCamelCase_ =pipe(
**lowerCAmelCase, return_dict=lowerCAmelCase, )[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCamelCase_ =np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch.device('''cpu''' )
class __UpperCamelCase :
lowercase : Union[str, Any] =1
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe.decoder.dtype
lowerCamelCase_ =1
lowerCamelCase_ =(
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCamelCase_ =pipe.prepare_latents(
lowerCAmelCase, dtype=lowerCAmelCase, device=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, scheduler=DummyScheduler() )
lowerCamelCase_ =(
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCamelCase_ =pipe.prepare_latents(
lowerCAmelCase, dtype=lowerCAmelCase, device=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, scheduler=DummyScheduler() )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
lowerCamelCase_ =pipe(
**lowerCAmelCase, decoder_latents=lowerCAmelCase, super_res_latents=lowerCAmelCase ).images
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase, pil_image=lowerCAmelCase )
# Don't pass image, instead pass embedding
lowerCamelCase_ =pipeline_inputs.pop('''image''' )
lowerCamelCase_ =pipe.image_encoder(lowerCAmelCase ).image_embeds
lowerCamelCase_ =pipe(
**lowerCAmelCase, decoder_latents=lowerCAmelCase, super_res_latents=lowerCAmelCase, image_embeddings=lowerCAmelCase, ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCamelCase_ =1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCAmelCase, expected_max_diff=lowerCAmelCase )
@skip_mps
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
lowerCamelCase_ =[
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, additional_params_copy_to_batched_inputs=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCamelCase_ =[2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowerCAmelCase, additional_params_copy_to_batched_inputs=lowerCAmelCase, )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowerCAmelCase )
@skip_mps
def lowercase__ ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase__ ( self ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def lowercase__ ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
lowerCamelCase_ =UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''', torch_dtype=torch.floataa )
lowerCamelCase_ =pipeline.to(lowerCAmelCase )
pipeline.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_ =pipeline(
lowerCAmelCase, generator=lowerCAmelCase, output_type='''np''', )
lowerCamelCase_ =output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase, 15 )
| 75
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return 0.0
def a_ ( __snake_case : np.ndarray , __snake_case : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
lowerCamelCase_ =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowerCamelCase_ =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def a_ ( __snake_case : FilterType , __snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ =512
lowerCamelCase_ =[1] + [0] * (size - 1)
lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs]
lowerCamelCase_ =[0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ =np.abs(np.fft.fft(__snake_case ) )
lowerCamelCase_ =20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
lowerCamelCase_ =get_bounds(__snake_case , __snake_case )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(__snake_case )
plt.show()
def a_ ( __snake_case : FilterType , __snake_case : int ) -> None:
"""simple docstring"""
lowerCamelCase_ =512
lowerCamelCase_ =[1] + [0] * (size - 1)
lowerCamelCase_ =[filter_type.process(__snake_case ) for item in inputs]
lowerCamelCase_ =[0] * (samplerate - size) # zero-padding
outputs += filler
lowerCamelCase_ =np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(__snake_case , -2 * pi ) )
plt.show()
| 75
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
lowerCamelCase__ : str = "2020.9.26"
lowerCamelCase__ : str = "xcodz-dot, cclaus, dhruvmanila"
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not all(isinstance(UpperCAmelCase_ , (float, int) ) for val in locals().values() ):
_UpperCAmelCase : Union[str, Any] = F"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(UpperCAmelCase_ )
_UpperCAmelCase : List[Any] = ((x * distance) / (z + distance)) * scale
_UpperCAmelCase : List[Any] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError("Axis must be a str" )
_UpperCAmelCase : Tuple = locals()
del input_variables["axis"]
if not all(isinstance(UpperCAmelCase_ , (float, int) ) for val in input_variables.values() ):
_UpperCAmelCase : Optional[Any] = (
'Input values except axis must either be float or int: '
F"""{list(input_variables.values() )}"""
)
raise TypeError(UpperCAmelCase_ )
_UpperCAmelCase : Dict = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_UpperCAmelCase : List[str] = x * math.cos(UpperCAmelCase_ ) - y * math.sin(UpperCAmelCase_ )
_UpperCAmelCase : str = y * math.cos(UpperCAmelCase_ ) + x * math.sin(UpperCAmelCase_ )
_UpperCAmelCase : str = z
elif axis == "x":
_UpperCAmelCase : Union[str, Any] = y * math.cos(UpperCAmelCase_ ) - z * math.sin(UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = z * math.cos(UpperCAmelCase_ ) + y * math.sin(UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = x
elif axis == "y":
_UpperCAmelCase : Tuple = x * math.cos(UpperCAmelCase_ ) - z * math.sin(UpperCAmelCase_ )
_UpperCAmelCase : List[str] = z * math.cos(UpperCAmelCase_ ) + x * math.sin(UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = y
else:
raise ValueError("not a valid axis, choose one of \'x\', \'y\', \'z\'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''')
| 357
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowerCAmelCase (__lowerCAmelCase ):
with open(__lowerCAmelCase , "rb" ) as f:
_UpperCAmelCase : List[str] = Image.open(__lowerCAmelCase )
return im.convert("RGB" )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the training data."} )
lowerCAmelCase : Optional[str] = field(default=UpperCAmelCase__ , metadata={"help": "A folder containing the validation data."} )
lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCAmelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase : str = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCAmelCase__ )} , )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowerCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCAmelCase : str = field(default=UpperCAmelCase__ , metadata={"help": "Name or path of preprocessor config."} )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = torch.stack([example["pixel_values"] for example in examples] )
_UpperCAmelCase : Tuple = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_UpperCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase : List[Any] = {}
if data_args.train_dir is not None:
_UpperCAmelCase : str = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
_UpperCAmelCase : Optional[Any] = os.path.join(data_args.validation_dir , "**" )
_UpperCAmelCase : Any = load_dataset(
"imagefolder" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase : int = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
_UpperCAmelCase : List[Any] = dataset["train"].train_test_split(data_args.train_val_split )
_UpperCAmelCase : List[str] = split["train"]
_UpperCAmelCase : Union[str, Any] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCAmelCase : Optional[int] = dataset["train"].features["labels"].names
_UpperCAmelCase , _UpperCAmelCase : int = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : int = str(__lowerCAmelCase )
_UpperCAmelCase : str = label
# Load the accuracy metric from the datasets package
_UpperCAmelCase : int = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase : List[str] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_UpperCAmelCase : int = image_processor.size["shortest_edge"]
else:
_UpperCAmelCase : int = (image_processor.size["height"], image_processor.size["width"])
_UpperCAmelCase : str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
_UpperCAmelCase : Optional[int] = Compose(
[
RandomResizedCrop(__lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_UpperCAmelCase : Union[str, Any] = Compose(
[
Resize(__lowerCAmelCase ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_UpperCAmelCase : Dict = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Optional[Any] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowerCAmelCase )
# Initalize our trainer
_UpperCAmelCase : Union[str, Any] = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : Any = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : Dict = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase : Dict = trainer.evaluate()
trainer.log_metrics("eval" , __lowerCAmelCase )
trainer.save_metrics("eval" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
_UpperCAmelCase : int = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 322
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class __magic_name__ ( snake_case ):
UpperCamelCase_ :Any = ["""pixel_values"""]
def __init__( self , _lowercase = True , _lowercase = None , _lowercase = PILImageResampling.BILINEAR , _lowercase = True , _lowercase = None , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = True , _lowercase = None , _lowercase = None , **_lowercase , )-> None:
super().__init__(**_lowercase )
UpperCamelCase_ = size if size is not None else {"shortest_edge": 256}
UpperCamelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase )
UpperCamelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = get_size_dict(_lowercase , param_name="crop_size" )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = resample
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = offset
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = PILImageResampling.BILINEAR , _lowercase = None , **_lowercase , )-> np.ndarray:
UpperCamelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(_lowercase , size["shortest_edge"] , default_to_square=_lowercase )
elif "height" in size and "width" in size:
UpperCamelCase_ = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = None , **_lowercase , )-> np.ndarray:
UpperCamelCase_ = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(_lowercase , size=(size["height"], size["width"]) , data_format=_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase = True , _lowercase = None , **_lowercase , )-> Tuple:
UpperCamelCase_ = image.astype(np.floataa )
if offset:
UpperCamelCase_ = image - (scale / 2)
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase , _lowercase = None , **_lowercase , )-> np.ndarray:
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCamelCase_ = to_numpy_array(_lowercase )
if do_resize:
UpperCamelCase_ = self.resize(image=_lowercase , size=_lowercase , resample=_lowercase )
if do_center_crop:
UpperCamelCase_ = self.center_crop(_lowercase , size=_lowercase )
if do_rescale:
UpperCamelCase_ = self.rescale(image=_lowercase , scale=_lowercase , offset=_lowercase )
if do_normalize:
UpperCamelCase_ = self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase )
UpperCamelCase_ = to_channel_dimension_format(_lowercase , _lowercase )
return image
def UpperCAmelCase_ ( self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = ChannelDimension.FIRST , **_lowercase , )-> PIL.Image.Image:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = offset if offset is not None else self.offset
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_lowercase , default_to_square=_lowercase )
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_lowercase , param_name="crop_size" )
if not valid_images(_lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCamelCase_ = make_batched(_lowercase )
UpperCamelCase_ = [
[
self._preprocess_image(
image=_lowercase , do_resize=_lowercase , size=_lowercase , resample=_lowercase , do_center_crop=_lowercase , crop_size=_lowercase , do_rescale=_lowercase , rescale_factor=_lowercase , offset=_lowercase , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , data_format=_lowercase , )
for img in video
]
for video in videos
]
UpperCamelCase_ = {"pixel_values": videos}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 60
|
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 1_0_0 )-> int:
"""simple docstring"""
UpperCamelCase_ = (n * (n + 1) // 2) ** 2
UpperCamelCase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[str] = 'blenderbot-small'
UpperCAmelCase__ : int = ['past_key_values']
UpperCAmelCase__ : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self: Tuple , UpperCamelCase_: Dict=5_02_65 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Any=8 , UpperCamelCase_: int=20_48 , UpperCamelCase_: str=16 , UpperCamelCase_: Tuple=8 , UpperCamelCase_: Optional[int]=20_48 , UpperCamelCase_: Any=16 , UpperCamelCase_: List[str]=0.0 , UpperCamelCase_: Any=0.0 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Dict=True , UpperCamelCase_: str="gelu" , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: Tuple=1 , UpperCamelCase_: Tuple=False , UpperCamelCase_: int=0 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Union[str, Any]=2 , **UpperCamelCase_: Tuple , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = d_model
__lowerCamelCase = encoder_ffn_dim
__lowerCamelCase = encoder_layers
__lowerCamelCase = encoder_attention_heads
__lowerCamelCase = decoder_ffn_dim
__lowerCamelCase = decoder_layers
__lowerCamelCase = decoder_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = activation_function
__lowerCamelCase = init_std
__lowerCamelCase = encoder_layerdrop
__lowerCamelCase = decoder_layerdrop
__lowerCamelCase = use_cache
__lowerCamelCase = encoder_layers
__lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowerCamelCase = {0: """batch"""}
__lowerCamelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """decoder_sequence"""}
__lowerCamelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCamelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
__lowerCamelCase, __lowerCamelCase = self.num_layers
for i in range(UpperCamelCase_ ):
__lowerCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
__lowerCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
__lowerCamelCase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase = super().outputs
else:
__lowerCamelCase = super(UpperCamelCase_ , self ).outputs
if self.use_past:
__lowerCamelCase, __lowerCamelCase = self.num_layers
for i in range(UpperCamelCase_ ):
__lowerCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
__lowerCamelCase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[TensorType] = None , ):
__lowerCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Generate decoder inputs
__lowerCamelCase = seq_length if not self.use_past else 1
__lowerCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowerCamelCase = dict(**UpperCamelCase_ , **UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCamelCase, __lowerCamelCase = common_inputs["""input_ids"""].shape
__lowerCamelCase = common_inputs["""decoder_input_ids"""].shape[1]
__lowerCamelCase, __lowerCamelCase = self.num_attention_heads
__lowerCamelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase = decoder_seq_length + 3
__lowerCamelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCamelCase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(UpperCamelCase_ , UpperCamelCase_ )] , dim=1 )
__lowerCamelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCamelCase, __lowerCamelCase = self.num_layers
__lowerCamelCase = min(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = max(UpperCamelCase_ , UpperCamelCase_ ) - min_num_layers
__lowerCamelCase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(UpperCamelCase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
torch.zeros(UpperCamelCase_ ),
) )
# TODO: test this.
__lowerCamelCase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(UpperCamelCase_ , UpperCamelCase_ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) )
return common_inputs
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[TensorType] = None , ):
__lowerCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__lowerCamelCase, __lowerCamelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCamelCase = seqlen + 2
__lowerCamelCase, __lowerCamelCase = self.num_layers
__lowerCamelCase, __lowerCamelCase = self.num_attention_heads
__lowerCamelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase = common_inputs["""attention_mask"""].dtype
__lowerCamelCase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 )
__lowerCamelCase = [
(torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ )
]
return common_inputs
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCamelCase = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCamelCase = tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
__lowerCamelCase = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
__lowerCamelCase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCamelCase = dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ ) )
return common_inputs
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
elif self.task == "causal-lm":
__lowerCamelCase = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
else:
__lowerCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ )
return common_inputs
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase = super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
__lowerCamelCase = super(UpperCamelCase_ , self )._flatten_past_key_values_(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 12
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( __lowerCAmelCase : str = "" ):
"""simple docstring"""
lowerCAmelCase_ = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowerCAmelCase_ = BeautifulSoup(requests.get(__lowerCAmelCase ).text , "html.parser" )
lowerCAmelCase_ = soup.find_all("td" , attrs="titleColumn" )
lowerCAmelCase_ = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__lowerCAmelCase , __lowerCAmelCase )
}
def lowerCamelCase__ ( __lowerCAmelCase : str = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
lowerCAmelCase_ = get_imdb_top_aaa_movies()
with open(__lowerCAmelCase , "w" , newline="" ) as out_file:
lowerCAmelCase_ = csv.writer(__lowerCAmelCase )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 231
| 0
|
'''simple docstring'''
import numpy as np
from PIL import Image
def UpperCAmelCase_ ( __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = np.array(__lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# compute the shape of the output matrix
_UpperCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_UpperCAmelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_UpperCAmelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
return updated_arr
def UpperCAmelCase_ ( __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = np.array(__lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# compute the shape of the output matrix
_UpperCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_UpperCAmelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_UpperCAmelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__SCREAMING_SNAKE_CASE :str = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 371
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__SCREAMING_SNAKE_CASE :Tuple = '''\
'''
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
__SCREAMING_SNAKE_CASE :List[Any] = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowercase ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : int = 1_6 , snake_case_ : bool = True , snake_case_ : int=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCAmelCase = "cuda"
else:
_UpperCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(snake_case_ )
_UpperCAmelCase = model.to(snake_case_ )
_UpperCAmelCase = AutoTokenizer.from_pretrained(snake_case_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(snake_case_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCAmelCase = model.config.max_length - 1
else:
_UpperCAmelCase = model.config.max_length
_UpperCAmelCase = tokenizer(
snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors="pt" , return_attention_mask=snake_case_ , ).to(snake_case_ )
_UpperCAmelCase = encodings["input_ids"]
_UpperCAmelCase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCAmelCase = []
_UpperCAmelCase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(snake_case_ ) , snake_case_ ) ):
_UpperCAmelCase = min(start_index + batch_size , len(snake_case_ ) )
_UpperCAmelCase = encoded_texts[start_index:end_index]
_UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(snake_case_ )
_UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(snake_case_ ), attn_mask] , dim=1 )
_UpperCAmelCase = encoded_batch
with torch.no_grad():
_UpperCAmelCase = model(snake_case_ , attention_mask=snake_case_ ).logits
_UpperCAmelCase = out_logits[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = attn_mask[..., 1:].contiguous()
_UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , snake_case_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case_ )}
| 156
| 0
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[int] )-> Dict:
'''simple docstring'''
A__ = 0
@slow
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
A__ = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowercase_ ),0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
A__ = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertIsInstance(lowercase_,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowercase_ ),0 )
def snake_case__ ( self : int )-> List[Any]:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size,1_2 )
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size,2_0 )
def snake_case__ ( self : str )-> List[str]:
'''simple docstring'''
A__ = AutoConfig.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_,lowercase_ )
# Check that tokenizer_type ≠ model_type
A__ = AutoTokenizer.from_pretrained(lowercase_,config=lowercase_ )
self.assertIsInstance(lowercase_,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size,1_2 )
def snake_case__ ( self : Tuple )-> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt',os.path.join(lowercase_,'vocab.txt' ) )
A__ = AutoTokenizer.from_pretrained(lowercase_,tokenizer_type='bert',use_fast=lowercase_ )
self.assertIsInstance(lowercase_,lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json',os.path.join(lowercase_,'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt',os.path.join(lowercase_,'merges.txt' ) )
A__ = AutoTokenizer.from_pretrained(lowercase_,tokenizer_type='gpt2',use_fast=lowercase_ )
self.assertIsInstance(lowercase_,lowercase_ )
@require_tokenizers
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt',os.path.join(lowercase_,'vocab.txt' ) )
A__ = AutoTokenizer.from_pretrained(lowercase_,tokenizer_type='bert' )
self.assertIsInstance(lowercase_,lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json',os.path.join(lowercase_,'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt',os.path.join(lowercase_,'merges.txt' ) )
A__ = AutoTokenizer.from_pretrained(lowercase_,tokenizer_type='gpt2' )
self.assertIsInstance(lowercase_,lowercase_ )
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
with pytest.raises(lowercase_ ):
AutoTokenizer.from_pretrained('./',tokenizer_type='xxx' )
@require_tokenizers
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
A__ = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(lowercase_,(BertTokenizer, BertTokenizerFast) )
if isinstance(lowercase_,lowercase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case,lowercase_ )
else:
self.assertEqual(tokenizer.do_lower_case,lowercase_ )
self.assertEqual(tokenizer.model_max_length,5_1_2 )
@require_tokenizers
def snake_case__ ( self : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase_,'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier',):
A__ = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def snake_case__ ( self : Tuple )-> int:
'''simple docstring'''
A__ = TOKENIZER_MAPPING.values()
A__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase_ )
@require_tokenizers
def snake_case__ ( self : List[str] )-> Optional[Any]:
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased',use_fast=lowercase_ ),lowercase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ),lowercase_ )
@require_tokenizers
def snake_case__ ( self : Any )-> List[Any]:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('distilbert-base-uncased',do_lower_case=lowercase_ )
A__ = 'Hello, world. How are you?'
A__ = tokenizer.tokenize(lowercase_ )
self.assertEqual('[UNK]',tokens[0] )
A__ = AutoTokenizer.from_pretrained('microsoft/mpnet-base',do_lower_case=lowercase_ )
A__ = tokenizer.tokenize(lowercase_ )
self.assertEqual('[UNK]',tokens[0] )
@require_tokenizers
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(lowercase_ ),lowercase_ )
self.assertEqual(tokenizer.model_max_length,5_1_2 )
self.assertEqual(tokenizer.vocab_size,3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token,'[UNK]' )
self.assertEqual(tokenizer.padding_side,'right' )
self.assertEqual(tokenizer.truncation_side,'right' )
def snake_case__ ( self : str )-> List[str]:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
A__ = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size,1_2 )
def snake_case__ ( self : int )-> Optional[int]:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase_,lowercase_ )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
A__ = get_tokenizer_config('bert-base-cased' )
A__ = config.pop('_commit_hash',lowercase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase_,{'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
A__ = get_tokenizer_config(lowercase_ )
self.assertDictEqual(lowercase_,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
A__ = AutoTokenizer.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
A__ = get_tokenizer_config(lowercase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'],'BertTokenizer' )
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
try:
AutoConfig.register('custom',lowercase_ )
AutoTokenizer.register(lowercase_,slow_tokenizer_class=lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoTokenizer.register(lowercase_,slow_tokenizer_class=lowercase_ )
A__ = CustomTokenizer.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
A__ = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_,lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
try:
AutoConfig.register('custom',lowercase_ )
# Can register in two steps
AutoTokenizer.register(lowercase_,slow_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig],(CustomTokenizer, None) )
AutoTokenizer.register(lowercase_,fast_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig],(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase_,slow_tokenizer_class=lowercase_,fast_tokenizer_class=lowercase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig],(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoTokenizer.register(lowercase_,fast_tokenizer_class=lowercase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = BertTokenizerFast.from_pretrained(lowercase_ )
bert_tokenizer.save_pretrained(lowercase_ )
A__ = CustomTokenizerFast.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
A__ = AutoTokenizer.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_,lowercase_ )
A__ = AutoTokenizer.from_pretrained(lowercase_,use_fast=lowercase_ )
self.assertIsInstance(lowercase_,lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self : Tuple )-> Optional[Any]:
'''simple docstring'''
with self.assertRaises(lowercase_ ):
A__ = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
A__ = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer',trust_remote_code=lowercase_ )
A__ = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer',trust_remote_code=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
A__ = AutoTokenizer.from_pretrained(lowercase_,trust_remote_code=lowercase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__,'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__,'NewTokenizerFast' )
# Test we can also load the slow version
A__ = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer',trust_remote_code=lowercase_,use_fast=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__,'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase_ )
A__ = AutoTokenizer.from_pretrained(lowercase_,trust_remote_code=lowercase_,use_fast=lowercase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__,'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__,'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__,'NewTokenizer' )
@require_tokenizers
def snake_case__ ( self : Dict )-> int:
'''simple docstring'''
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = False
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = NewTokenizer
lowerCamelCase = False
try:
AutoConfig.register('custom',lowercase_ )
AutoTokenizer.register(lowercase_,slow_tokenizer_class=lowercase_ )
AutoTokenizer.register(lowercase_,fast_tokenizer_class=lowercase_ )
# If remote code is not set, the default is to use local
A__ = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__,'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
A__ = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer',use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__,'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
A__ = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer',trust_remote_code=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__,'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
A__ = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer',trust_remote_code=lowercase_,use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__,'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
A__ = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer',trust_remote_code=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__,'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
A__ = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer',trust_remote_code=lowercase_,use_fast=lowercase_ )
self.assertEqual(tokenizer.__class__.__name__,'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self : Optional[int] )-> Dict:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy',trust_remote_code=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__,'NewTokenizerFast' )
# Test we can also load the slow version
A__ = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy',trust_remote_code=lowercase_,use_fast=lowercase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__,'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__,'NewTokenizer' )
def snake_case__ ( self : Dict )-> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase_,'bert-base is not a local folder and is not a valid model identifier' ):
A__ = AutoTokenizer.from_pretrained('bert-base' )
def snake_case__ ( self : Optional[int] )-> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase_,r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
A__ = AutoTokenizer.from_pretrained(lowercase_,revision='aaaaaa' )
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
A__ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count,0 )
self.assertEqual(counter.head_request_count,1 )
self.assertEqual(counter.other_request_count,0 )
| 7
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__: Tuple = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__magic_name__: Optional[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__magic_name__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 342
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[Any] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 151
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : Tuple = '▁'
lowercase : str = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowercase : Optional[int] = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
lowercase : List[Any] = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
lowercase : Optional[int] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = ['input_ids', 'attention_mask']
_A = []
_A = []
def __init__( self :Tuple , a :List[str] , a :int , a :Dict=None , a :List[Any]=None , a :List[str]="<s>" , a :str="</s>" , a :Dict="</s>" , a :Optional[Any]="<pad>" , a :Union[str, Any]="<unk>" , a :List[Any]="m2m100" , a :Optional[Dict[str, Any]] = None , a :List[str]=8 , **a :Tuple , ) -> None:
__UpperCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase : List[str] = language_codes
__UpperCamelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase : Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(a )
for lang_code in fairseq_language_code
if self.get_lang_token(a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a , tgt_lang=a , bos_token=a , eos_token=a , sep_token=a , unk_token=a , pad_token=a , language_codes=a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=a , **a , )
__UpperCamelCase : Optional[Any] = vocab_file
__UpperCamelCase : List[str] = load_json(a )
__UpperCamelCase : Dict = {v: k for k, v in self.encoder.items()}
__UpperCamelCase : int = spm_file
__UpperCamelCase : List[Any] = load_spm(a , self.sp_model_kwargs )
__UpperCamelCase : int = len(self.encoder )
__UpperCamelCase : Tuple = {
self.get_lang_token(a ): self.encoder_size + i for i, lang_code in enumerate(a )
}
__UpperCamelCase : int = {lang_code: self.encoder_size + i for i, lang_code in enumerate(a )}
__UpperCamelCase : Dict = {v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase : int = src_lang if src_lang is not None else "en"
__UpperCamelCase : int = tgt_lang
__UpperCamelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase : Union[str, Any] = num_madeup_words
@property
def _lowerCamelCase ( self :int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _lowerCamelCase ( self :List[str] ) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self :Any , a :str ) -> None:
__UpperCamelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self :int , a :str ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _lowerCamelCase ( self :List[str] , a :str ) -> str:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(a , self.encoder[self.unk_token] )
def _lowerCamelCase ( self :List[Any] , a :int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(a , self.unk_token )
def _lowerCamelCase ( self :List[str] , a :Optional[Any] ) -> Tuple:
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
__UpperCamelCase : List[Any] = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def _lowerCamelCase ( self :Optional[int] , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
__UpperCamelCase : Optional[Any] = [1] * len(self.prefix_tokens )
__UpperCamelCase : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a )) + suffix_ones
return prefix_ones + ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def _lowerCamelCase ( self :List[Any] , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self :Dict ) -> Dict:
__UpperCamelCase : int = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> Dict:
__UpperCamelCase : Union[str, Any] = self.__dict__.copy()
__UpperCamelCase : int = None
return state
def __setstate__( self :List[Any] , a :Dict ) -> None:
__UpperCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : Optional[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def _lowerCamelCase ( self :List[Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
__UpperCamelCase : str = Path(a )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase : List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
__UpperCamelCase : List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , a )
if os.path.abspath(self.spm_file ) != os.path.abspath(a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , a )
elif not os.path.isfile(self.spm_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a )
return (str(a ), str(a ))
def _lowerCamelCase ( self :Dict , a :List[str] , a :str = "en" , a :Optional[List[str]] = None , a :str = "ro" , **a :Union[str, Any] , ) -> BatchEncoding:
__UpperCamelCase : List[str] = src_lang
__UpperCamelCase : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(a , a , **a )
def _lowerCamelCase ( self :Union[str, Any] , a :int , a :Optional[str] , a :Optional[str] , **a :List[str] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__UpperCamelCase : int = src_lang
__UpperCamelCase : Tuple = self(a , add_special_tokens=a , **a )
__UpperCamelCase : Optional[int] = self.get_lang_id(a )
__UpperCamelCase : Any = tgt_lang_id
return inputs
def _lowerCamelCase ( self :Any ) -> str:
self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self :Optional[int] ) -> Any:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self :Union[str, Any] , a :str ) -> None:
__UpperCamelCase : str = self.get_lang_token(a )
__UpperCamelCase : Union[str, Any] = self.lang_token_to_id[lang_token]
__UpperCamelCase : Optional[int] = [self.cur_lang_id]
__UpperCamelCase : str = [self.eos_token_id]
def _lowerCamelCase ( self :int , a :str ) -> None:
__UpperCamelCase : Any = self.get_lang_token(a )
__UpperCamelCase : Dict = self.lang_token_to_id[lang_token]
__UpperCamelCase : List[Any] = [self.cur_lang_id]
__UpperCamelCase : Tuple = [self.eos_token_id]
def _lowerCamelCase ( self :Optional[Any] , a :str ) -> str:
return self.lang_code_to_token[lang]
def _lowerCamelCase ( self :Optional[Any] , a :str ) -> int:
__UpperCamelCase : Dict = self.get_lang_token(a )
return self.lang_token_to_id[lang_token]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any]) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
__UpperCamelCase : str = sentencepiece.SentencePieceProcessor(**_lowerCamelCase)
spm.Load(str(_lowerCamelCase))
return spm
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Union[Dict, List]:
'''simple docstring'''
with open(_lowerCamelCase , "r") as f:
return json.load(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : str) -> None:
'''simple docstring'''
with open(_lowerCamelCase , "w") as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2)
| 151
| 1
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__ = None , A__ = None , A__ = False , A__ = False , A__ = None , A__ = None , **A__ , ):
super().__init__(
features=A__ , cache_dir=A__ , keep_in_memory=A__ , streaming=A__ , num_proc=A__ , **A__ , )
A__ : Union[str, Any] = Generator(
cache_dir=A__ , features=A__ , generator=A__ , gen_kwargs=A__ , **A__ , )
def __A ( self ):
# Build iterable dataset
if self.streaming:
A__ : Any = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
A__ : List[Any] = None
A__ : Any = None
A__ : Optional[Any] = None
A__ : Optional[Any] = None
self.builder.download_and_prepare(
download_config=A__ , download_mode=A__ , verification_mode=A__ , base_path=A__ , num_proc=self.num_proc , )
A__ : List[Any] = self.builder.as_dataset(
split="""train""" , verification_mode=A__ , in_memory=self.keep_in_memory )
return dataset
| 192
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a (unittest.TestCase ):
'''simple docstring'''
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Dict = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(A__ )
def __A ( self ):
A__ : int = self.dummy_uncond_unet
A__ : Optional[int] = DDIMScheduler()
A__ : List[Any] = self.dummy_vq_model
A__ : Optional[int] = LDMPipeline(unet=A__ , vqvae=A__ , scheduler=A__ )
ldm.to(A__ )
ldm.set_progress_bar_config(disable=A__ )
A__ : Tuple = torch.manual_seed(0 )
A__ : List[Any] = ldm(generator=A__ , num_inference_steps=2 , output_type="""numpy""" ).images
A__ : Optional[int] = torch.manual_seed(0 )
A__ : List[Any] = ldm(generator=A__ , num_inference_steps=2 , output_type="""numpy""" , return_dict=A__ )[0]
A__ : str = image[0, -3:, -3:, -1]
A__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Dict = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
A__ : List[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Optional[Any] = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(A__ )
ldm.set_progress_bar_config(disable=A__ )
A__ : Tuple = torch.manual_seed(0 )
A__ : List[Any] = ldm(generator=A__ , num_inference_steps=5 , output_type="""numpy""" ).images
A__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A__ : Any = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
A__ : int = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 192
| 1
|
"""simple docstring"""
from itertools import permutations
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : tuple ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
UpperCamelCase :str = [7, 11, 13, 17]
for i, test in enumerate(_snake_case ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 10 ) -> int:
"""simple docstring"""
return sum(
int("""""".join(map(_snake_case , _snake_case ) ) )
for num in permutations(range(_snake_case ) )
if is_substring_divisible(_snake_case ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 352
|
from math import pi
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 62
| 0
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowercase_ = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
lowercase_ = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
lowercase_ = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def snake_case_( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def snake_case_( self , A , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
_SCREAMING_SNAKE_CASE = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
_SCREAMING_SNAKE_CASE = evaluate(dataset=A , predictions=A )
return score
| 58
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = PriorTransformer
UpperCamelCase__ = '''hidden_states'''
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[Any] = 4
lowercase_ : int = 8
lowercase_ : Union[str, Any] = 7
lowercase_ : List[str] = floats_tensor((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : Tuple = floats_tensor((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : List[str] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[Any]=0 ):
torch.manual_seed(lowercase_ )
lowercase_ : int = 4
lowercase_ : Any = 8
lowercase_ : Tuple = 7
lowercase_ : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : Tuple = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : Union[str, Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
return (4, 8)
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return (4, 8)
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
lowercase_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ , lowercase_ : Tuple = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowercase_ )
lowercase_ : Optional[int] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.prepare_init_args_and_inputs_for_common()
lowercase_ : List[Any] = self.model_class(**lowercase_ )
lowercase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Any = [*signature.parameters.keys()]
lowercase_ : Optional[Any] = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
lowercase_ : Any = model.to(lowercase_ )
if hasattr(lowercase_ , """set_default_attn_processor""" ):
model.set_default_attn_processor()
lowercase_ : Any = self.get_dummy_seed_input()
with torch.no_grad():
lowercase_ : List[str] = model(**lowercase_ )[0]
lowercase_ : Tuple = output[0, :5].flatten().cpu()
print(lowercase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase_ : Dict = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2 ) )
@slow
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Union[str, Any]=1 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=77 , lowercase_ : Optional[int]=0 ):
torch.manual_seed(lowercase_ )
lowercase_ : Optional[Any] = batch_size
lowercase_ : int = embedding_dim
lowercase_ : int = num_embeddings
lowercase_ : Dict = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : Any = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
lowercase_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def SCREAMING_SNAKE_CASE_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] ):
lowercase_ : List[Any] = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(lowercase_ )
lowercase_ : Optional[Any] = self.get_dummy_seed_input(seed=lowercase_ )
with torch.no_grad():
lowercase_ : Tuple = model(**lowercase_ )[0]
assert list(sample.shape ) == [1, 768]
lowercase_ : Union[str, Any] = sample[0, :8].flatten().cpu()
print(lowercase_ )
lowercase_ : Optional[Any] = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1E-3 )
| 239
| 0
|
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
A : Dict = first & second
first ^= second
A : Dict = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = int(input("""Enter the first number: """).strip())
__SCREAMING_SNAKE_CASE = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 351
|
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE__ = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
SCREAMING_SNAKE_CASE__ = '▁'
class A__ ( UpperCAmelCase__ ):
lowerCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : List[str] = ["""input_ids""", """token_type_ids"""]
lowerCAmelCase__ : Optional[Any] = FNetTokenizer
def __init__( self : Optional[Any] , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : str="<unk>" , _UpperCAmelCase : Tuple="[SEP]" , _UpperCAmelCase : int="<pad>" , _UpperCAmelCase : int="[CLS]" , _UpperCAmelCase : Optional[int]="[MASK]" , **_UpperCAmelCase : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] = None ) -> str:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any = None ) -> Any:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any = None ) -> List[Any]:
"""simple docstring"""
if not os.path.isdir(snake_case_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 325
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.framework == "tf":
A_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = self.get_masked_index(snake_case_ )
A_ : str = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None , **snake_case_ ):
"""simple docstring"""
if return_tensors is None:
A_ : Any = self.framework
A_ : Dict = self.tokenizer(snake_case_ , return_tensors=snake_case_ )
self.ensure_exactly_one_mask_token(snake_case_ )
return model_inputs
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = self.model(**snake_case_ )
A_ : Optional[int] = model_inputs['input_ids']
return model_outputs
def lowerCamelCase_ ( self , snake_case_ , snake_case_=5 , snake_case_=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
A_ : str = target_ids.shape[0]
A_ : Optional[Any] = model_outputs['input_ids'][0]
A_ : List[Any] = model_outputs['logits']
if self.framework == "tf":
A_ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A_ : Union[str, Any] = outputs.numpy()
A_ : Optional[int] = outputs[0, masked_index, :]
A_ : Optional[Any] = stable_softmax(snake_case_ , axis=-1 )
if target_ids is not None:
A_ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) )
A_ : Optional[int] = tf.expand_dims(snake_case_ , 0 )
A_ : Any = tf.math.top_k(snake_case_ , k=snake_case_ )
A_ , A_ : str = topk.values.numpy(), topk.indices.numpy()
else:
A_ : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A_ : Tuple = outputs[0, masked_index, :]
A_ : List[str] = logits.softmax(dim=-1 )
if target_ids is not None:
A_ : str = probs[..., target_ids]
A_ , A_ : List[str] = probs.topk(snake_case_ )
A_ : List[Any] = []
A_ : int = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A_ : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
A_ : str = target_ids[p].tolist()
A_ : Union[str, Any] = p
# Filter padding out:
A_ : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A_ : Any = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
A_ : Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(snake_case_ )
result.append(snake_case_ )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = [targets]
try:
A_ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
A_ : int = {}
A_ : Tuple = []
for target in targets:
A_ : int = vocab.get(snake_case_ , snake_case_ )
if id_ is None:
A_ : Tuple = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )['input_ids']
if len(snake_case_ ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
A_ : str = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
A_ : Tuple = list(set(snake_case_ ) )
if len(snake_case_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
A_ : Optional[Any] = np.array(snake_case_ )
return target_ids
def lowerCamelCase_ ( self , snake_case_=None , snake_case_=None ):
"""simple docstring"""
A_ : List[str] = {}
if targets is not None:
A_ : Any = self.get_target_ids(snake_case_ , snake_case_ )
A_ : Optional[Any] = target_ids
if top_k is not None:
A_ : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : List[str] = super().__call__(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1:
return outputs[0]
return outputs
| 286
| 0
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowercase ( unittest.TestCase , a ):
def __snake_case( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_tool("text-to-speech" )
self.tool.setup()
def __snake_case( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = self.tool("hey" )
SCREAMING_SNAKE_CASE = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def __snake_case( self : Any ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = self.tool("hey" )
SCREAMING_SNAKE_CASE = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 357
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = SwinConfig(
embed_dim=1_9_2 , depths=(2, 2, 1_8, 2) , num_heads=(6, 1_2, 2_4, 4_8) , window_size=1_2 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE = DetaConfig(
backbone_config=UpperCAmelCase__ , num_queries=9_0_0 , encoder_ffn_dim=2_0_4_8 , decoder_ffn_dim=2_0_4_8 , num_feature_levels=5 , assign_first_stage=UpperCAmelCase__ , with_box_refine=UpperCAmelCase__ , two_stage=UpperCAmelCase__ , )
# set labels
SCREAMING_SNAKE_CASE = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE = 3_6_6
SCREAMING_SNAKE_CASE = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE = 9_1
SCREAMING_SNAKE_CASE = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="dataset" ) ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-dim :]
# fmt: on
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ):
# transformer decoder self-attention layers
SCREAMING_SNAKE_CASE = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE = in_proj_bias[-hidden_size:]
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = get_deta_config(UpperCAmelCase__ )
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(UpperCAmelCase__ , param.shape )
# rename keys
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_swin_q_k_v(UpperCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE = DetaForObjectDetection(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu"
model.to(UpperCAmelCase__ )
# load image processor
SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = processor(images=UpperCAmelCase__ , return_tensors="pt" )
SCREAMING_SNAKE_CASE = encoding["pixel_values"]
SCREAMING_SNAKE_CASE = model(pixel_values.to(UpperCAmelCase__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
SCREAMING_SNAKE_CASE = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(UpperCAmelCase__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(UpperCAmelCase__ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : Any = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 206
| 0
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCamelCase ( __snake_case : str, __snake_case : Union[str, Any], __snake_case : List[str] ) -> Optional[Any]:
"""simple docstring"""
A__ : str =0
if start < end:
A__ : List[str] =randint(__snake_case, __snake_case )
A__ : List[str] =a[end]
A__ : Union[str, Any] =a[pivot]
A__ : List[str] =temp
A__ , A__ : Optional[Any] =_in_place_partition(__snake_case, __snake_case, __snake_case )
count += _in_place_quick_sort(__snake_case, __snake_case, p - 1 )
count += _in_place_quick_sort(__snake_case, p + 1, __snake_case )
return count
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[str] ) -> Any:
"""simple docstring"""
A__ : Optional[int] =0
A__ : Optional[int] =randint(__snake_case, __snake_case )
A__ : Tuple =a[end]
A__ : Union[str, Any] =a[pivot]
A__ : List[str] =temp
A__ : str =start - 1
for index in range(__snake_case, __snake_case ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
A__ : Dict =new_pivot_index + 1
A__ : Any =a[new_pivot_index]
A__ : int =a[index]
A__ : Optional[Any] =temp
A__ : Tuple =a[new_pivot_index + 1]
A__ : List[Any] =a[end]
A__ : Union[str, Any] =temp
return new_pivot_index + 1, count
__snake_case : Any = TemporaryFile()
__snake_case : Any = 100 # 1000 elements are to be sorted
__snake_case , __snake_case : Dict = 0, 1 # mean and standard deviation
__snake_case : Any = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
__snake_case : List[str] = np.load(outfile)
__snake_case : Any = len(M) - 1
__snake_case : Dict = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 134
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int = None , lowerCAmelCase_ : int = None ) -> str:
'''simple docstring'''
super().__init__()
A__ : Optional[Any] =pad_token_id
A__ : int =max_length
A__ : Optional[int] =vocab
A__ : Any =merges
A__ : Optional[Any] =BytePairTokenizer(lowerCAmelCase_ , lowerCAmelCase_ , sequence_length=lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls : Optional[int] , lowerCAmelCase_ : GPTaTokenizer , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
A__ : Any =[""" """.join(lowerCAmelCase_ ) for m in tokenizer.bpe_ranks.keys()]
A__ : List[str] =tokenizer.get_vocab()
return cls(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls : Tuple , lowerCAmelCase_ : Union[str, os.PathLike] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =GPTaTokenizer.from_pretrained(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
return cls.from_tokenizer(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def lowercase__ ( cls : str , lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return cls(**lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int = None ) -> Tuple:
'''simple docstring'''
A__ : Optional[int] =self.tf_tokenizer(lowerCAmelCase_ )
A__ : List[Any] =tf.ones_like(lowerCAmelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
A__ : Union[str, Any] =max_length if max_length is not None else self.max_length
if max_length is not None:
A__ , A__ : Any =pad_model_inputs(
lowerCAmelCase_ , max_seq_length=lowerCAmelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 134
| 1
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCamelCase__ = TypeVar("T")
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
snake_case : deque[T] # Cache store of keys
snake_case : set[T] # References of the keys in cache
snake_case : int = 10 # Maximum capacity of cache
def __init__( self , __lowerCAmelCase ):
UpperCamelCase__ = deque()
UpperCamelCase__ = set()
if not n:
UpperCamelCase__ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
UpperCamelCase__ = n
def _lowerCamelCase ( self , __lowerCAmelCase ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase__ = self.dq_store.pop()
self.key_reference.remove(__lowerCAmelCase )
else:
self.dq_store.remove(__lowerCAmelCase )
self.dq_store.appendleft(__lowerCAmelCase )
self.key_reference.add(__lowerCAmelCase )
def _lowerCamelCase ( self ):
for k in self.dq_store:
print(__lowerCAmelCase )
def __repr__( self ):
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 354
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase__ = logging.getLogger(__name__)
def _UpperCamelCase (a__ :Union[str, Any] , a__ :Optional[Any] ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
snake_case : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
snake_case : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , a__ )
# Set seed
set_seed(training_args.seed )
try:
UpperCamelCase__ = processors[data_args.task_name]()
UpperCamelCase__ = processor.get_labels()
UpperCamelCase__ = len(a__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(a__ :EvalPrediction ) -> Dict:
UpperCamelCase__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(a__ , p.label_ids )}
# Data collator
UpperCamelCase__ = DataCollatorWithPadding(a__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase__ = Trainer(
model=a__ , args=a__ , train_dataset=a__ , eval_dataset=a__ , compute_metrics=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase__ = trainer.evaluate()
UpperCamelCase__ = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(a__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , a__ , a__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(a__ )
return results
def _UpperCamelCase (a__ :Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 87
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A=None , A=None , *A , **A ) -> Optional[int]:
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f' {self.model.__class__}'
)
_SCREAMING_SNAKE_CASE = self.model.config
else:
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = data_args
_SCREAMING_SNAKE_CASE = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
""" padding..""" )
if self.args.label_smoothing == 0:
_SCREAMING_SNAKE_CASE = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_SCREAMING_SNAKE_CASE = label_smoothed_nll_loss
def snake_case_( self , A ) -> int:
if self.optimizer is None:
_SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.weight"""]
_SCREAMING_SNAKE_CASE = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
_SCREAMING_SNAKE_CASE = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_SCREAMING_SNAKE_CASE = Adafactor
_SCREAMING_SNAKE_CASE = {"""scale_parameter""": False, """relative_step""": False}
else:
_SCREAMING_SNAKE_CASE = AdamW
_SCREAMING_SNAKE_CASE = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
_SCREAMING_SNAKE_CASE = self.args.learning_rate
if self.sharded_ddp:
_SCREAMING_SNAKE_CASE = OSS(
params=A , optim=A , **A , )
else:
_SCREAMING_SNAKE_CASE = optimizer_cls(A , **A )
if self.lr_scheduler is None:
_SCREAMING_SNAKE_CASE = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def snake_case_( self , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_SCREAMING_SNAKE_CASE = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_SCREAMING_SNAKE_CASE = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_SCREAMING_SNAKE_CASE = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def snake_case_( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def snake_case_( self , A , A , A ) -> List[str]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_SCREAMING_SNAKE_CASE = model(**A , use_cache=A )[0]
_SCREAMING_SNAKE_CASE = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
_SCREAMING_SNAKE_CASE = model(**A , use_cache=A )[0]
_SCREAMING_SNAKE_CASE = torch.nn.functional.log_softmax(A , dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def snake_case_( self , A , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = inputs.pop("""labels""" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._compute_loss(A , A , A )
return loss
def snake_case_( self , A , A , A , A = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
_SCREAMING_SNAKE_CASE = self._prepare_inputs(A )
_SCREAMING_SNAKE_CASE = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_SCREAMING_SNAKE_CASE = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_SCREAMING_SNAKE_CASE = self._pad_tensors_to_max_len(A , gen_kwargs["""max_length"""] )
_SCREAMING_SNAKE_CASE = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._compute_loss(A , A , A )
_SCREAMING_SNAKE_CASE = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_SCREAMING_SNAKE_CASE = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_SCREAMING_SNAKE_CASE = self._pad_tensors_to_max_len(A , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def snake_case_( self , A , A ) -> int:
# If PAD token is not defined at least EOS token has to be defined
_SCREAMING_SNAKE_CASE = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f' padded to `max_length`={max_length}' )
_SCREAMING_SNAKE_CASE = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_SCREAMING_SNAKE_CASE = tensor
return padded_tensor
| 58
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Any = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Tuple = min(SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : str = min(SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and feature.dtype == "binary":
A_ : Union[str, Any] = min(SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return None if batch_size is np.inf else batch_size
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->str:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ : str = path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
A_ : Optional[int] = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
A_ : Union[str, Any] = Parquet(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , hash=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
if self.streaming:
A_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : List[str] = None
A_ : List[str] = None
A_ : List[Any] = None
A_ : Dict = None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
A_ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = dataset
A_ : Union[str, Any] = path_or_buf
A_ : Any = batch_size or get_writer_batch_size(dataset.features )
A_ : Optional[int] = parquet_writer_kwargs
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Union[str, Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
A_ : str = self._write(file_obj=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
else:
A_ : Tuple = self._write(file_obj=self.path_or_buf , batch_size=_SCREAMING_SNAKE_CASE , **self.parquet_writer_kwargs )
return written
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->int:
'''simple docstring'''
A_ : List[Any] = 0
A_ : int = parquet_writer_kwargs.pop('''path_or_buf''' , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = self.dataset.features.arrow_schema
A_ : List[str] = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _SCREAMING_SNAKE_CASE ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
A_ : List[Any] = query_table(
table=self.dataset._data , key=slice(_SCREAMING_SNAKE_CASE , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_SCREAMING_SNAKE_CASE )
written += batch.nbytes
writer.close()
return written
| 186
| 0
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
UpperCAmelCase__ = "src/transformers"
UpperCAmelCase__ = "docs/source/en/tasks"
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ) -> int:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_UpperCAmelCase = f.readlines()
# Find the start prompt.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_UpperCAmelCase ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
while not lines[end_index].startswith(_UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
UpperCAmelCase__ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
UpperCAmelCase__ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = TASK_GUIDE_TO_MODELS[task_guide]
_UpperCAmelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_UpperCAmelCase , set() )
_UpperCAmelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict=False ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _find_text_in_file(
filename=os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
_UpperCAmelCase = get_model_list_for_task(_UpperCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
' to fix this.' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase__ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 364
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[int] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
lowerCAmelCase : int = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ ) , x.transpose() ) )
lowerCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[int] = np.random.randn(3 , 4 )
lowerCAmelCase : Any = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ ) , transpose(UpperCamelCase_ ).numpy() ) )
lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : Tuple = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ , axes=(1, 2, 0) ) , transpose(UpperCamelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = np.random.randn(3 , 4 )
lowerCAmelCase : str = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ ) , transpose(UpperCamelCase_ ).numpy() ) )
lowerCAmelCase : Optional[Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : List[Any] = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ , axes=(1, 2, 0) ) , transpose(UpperCamelCase_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase : Union[str, Any] = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ ) , np.asarray(transpose(UpperCamelCase_ ) ) ) )
lowerCAmelCase : Tuple = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : Dict = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(transpose(UpperCamelCase_ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCamelCase_ , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (4, 3) ) , np.reshape(UpperCamelCase_ , (4, 3) ) ) )
lowerCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (1_2, 5) ) , np.reshape(UpperCamelCase_ , (1_2, 5) ) ) )
@require_torch
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
lowerCAmelCase : Optional[Any] = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (4, 3) ) , reshape(UpperCamelCase_ , (4, 3) ).numpy() ) )
lowerCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : Union[str, Any] = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (1_2, 5) ) , reshape(UpperCamelCase_ , (1_2, 5) ).numpy() ) )
@require_tf
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = np.random.randn(3 , 4 )
lowerCAmelCase : str = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (4, 3) ) , reshape(UpperCamelCase_ , (4, 3) ).numpy() ) )
lowerCAmelCase : int = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : Tuple = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (1_2, 5) ) , reshape(UpperCamelCase_ , (1_2, 5) ).numpy() ) )
@require_flax
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Dict = np.random.randn(3 , 4 )
lowerCAmelCase : Union[str, Any] = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (4, 3) ) , np.asarray(reshape(UpperCamelCase_ , (4, 3) ) ) ) )
lowerCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase : int = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(reshape(UpperCamelCase_ , (1_2, 5) ) , np.asarray(reshape(UpperCamelCase_ , (1_2, 5) ) ) ) )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ ) , np.squeeze(UpperCamelCase_ ) ) )
lowerCAmelCase : Any = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ , axis=2 ) , np.squeeze(UpperCamelCase_ , axis=2 ) ) )
@require_torch
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = np.random.randn(1 , 3 , 4 )
lowerCAmelCase : Union[str, Any] = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ ) , squeeze(UpperCamelCase_ ).numpy() ) )
lowerCAmelCase : Dict = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase : Tuple = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ , axis=2 ) , squeeze(UpperCamelCase_ , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : str = np.random.randn(1 , 3 , 4 )
lowerCAmelCase : List[str] = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ ) , squeeze(UpperCamelCase_ ).numpy() ) )
lowerCAmelCase : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase : Optional[int] = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ , axis=2 ) , squeeze(UpperCamelCase_ , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : int = np.random.randn(1 , 3 , 4 )
lowerCAmelCase : str = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ ) , np.asarray(squeeze(UpperCamelCase_ ) ) ) )
lowerCAmelCase : List[str] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase : str = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase_ , axis=2 ) , np.asarray(squeeze(UpperCamelCase_ , axis=2 ) ) ) )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase_ , axis=1 ) , np.expand_dims(UpperCamelCase_ , axis=1 ) ) )
@require_torch
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = np.random.randn(3 , 4 )
lowerCAmelCase : Any = torch.tensor(UpperCamelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase_ , axis=1 ) , expand_dims(UpperCamelCase_ , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = np.random.randn(3 , 4 )
lowerCAmelCase : Optional[int] = tf.constant(UpperCamelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase_ , axis=1 ) , expand_dims(UpperCamelCase_ , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Dict = np.random.randn(3 , 4 )
lowerCAmelCase : Tuple = jnp.array(UpperCamelCase_ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase_ , axis=1 ) , np.asarray(expand_dims(UpperCamelCase_ , axis=1 ) ) ) )
| 60
|
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_snake_case , _snake_case ) ) )
def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray ):
if dataset.ndim != value_array.ndim:
lowerCAmelCase : List[Any] = (
'''Wrong input data\'s dimensions... '''
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(_snake_case )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase : Dict = (
'''Wrong input data\'s shape... '''
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(_snake_case )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
lowerCAmelCase : Optional[Any] = (
'''Input data have different datatype... '''
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(_snake_case )
lowerCAmelCase : str = []
for value in value_array:
lowerCAmelCase : int = euclidean(_snake_case , dataset[0] )
lowerCAmelCase : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase : Any = euclidean(_snake_case , _snake_case )
if dist > temp_dist:
lowerCAmelCase : List[Any] = temp_dist
lowerCAmelCase : Tuple = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray ):
return np.dot(_snake_case , _snake_case ) / (norm(_snake_case ) * norm(_snake_case ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Any ="""ctrl"""
lowercase : Optional[int] =["""past_key_values"""]
lowercase : Optional[int] ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase_=24_6534 , UpperCamelCase_=256 , UpperCamelCase_=1280 , UpperCamelCase_=8192 , UpperCamelCase_=48 , UpperCamelCase_=16 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-6 , UpperCamelCase_=0.02 , UpperCamelCase_=True , **UpperCamelCase_ , ):
lowercase_ :List[Any] = vocab_size
lowercase_ :Dict = n_positions
lowercase_ :Union[str, Any] = n_embd
lowercase_ :List[Any] = n_layer
lowercase_ :Union[str, Any] = n_head
lowercase_ :Any = dff
lowercase_ :Dict = resid_pdrop
lowercase_ :List[str] = embd_pdrop
lowercase_ :int = layer_norm_epsilon
lowercase_ :Any = initializer_range
lowercase_ :Optional[int] = use_cache
super().__init__(**UpperCamelCase_ )
| 252
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , **UpperCamelCase_ ):
requires_backends(self , ['''bs4'''] )
super().__init__(**UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = []
lowercase_ :Union[str, Any] = []
lowercase_ :Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowercase_ :Any = parent.find_all(child.name , recursive=UpperCamelCase_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase_ ) else next(i for i, s in enumerate(UpperCamelCase_ , 1 ) if s is child ) )
lowercase_ :str = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Dict = BeautifulSoup(UpperCamelCase_ , '''html.parser''' )
lowercase_ :Union[str, Any] = []
lowercase_ :Union[str, Any] = []
lowercase_ :List[Any] = []
for element in html_code.descendants:
if type(UpperCamelCase_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowercase_ :Dict = html.unescape(UpperCamelCase_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase_ )
lowercase_ , lowercase_ :Tuple = self.xpath_soup(UpperCamelCase_ )
stringaxtag_seq.append(UpperCamelCase_ )
stringaxsubs_seq.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = ''''''
for tagname, subs in zip(UpperCamelCase_ , UpperCamelCase_ ):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__( self , UpperCamelCase_ ):
lowercase_ :Dict = False
# Check that strings has a valid type
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = True
elif isinstance(UpperCamelCase_ , (list, tuple) ):
if len(UpperCamelCase_ ) == 0 or isinstance(html_strings[0] , UpperCamelCase_ ):
lowercase_ :Tuple = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"but is of type {type(UpperCamelCase_ )}." )
lowercase_ :List[Any] = bool(isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase_ )) )
if not is_batched:
lowercase_ :Dict = [html_strings]
# Get nodes + xpaths
lowercase_ :List[Any] = []
lowercase_ :List[str] = []
for html_string in html_strings:
lowercase_ , lowercase_ , lowercase_ :List[str] = self.get_three_from_single(UpperCamelCase_ )
nodes.append(UpperCamelCase_ )
lowercase_ :str = []
for node, tag_list, sub_list in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = self.construct_xpath(UpperCamelCase_ , UpperCamelCase_ )
xpath_strings.append(UpperCamelCase_ )
xpaths.append(UpperCamelCase_ )
# return as Dict
lowercase_ :int = {'''nodes''': nodes, '''xpaths''': xpaths}
lowercase_ :Optional[int] = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
return encoded_inputs
| 252
| 1
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowercase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : WhisperForConditionalGeneration , SCREAMING_SNAKE_CASE_ : WhisperProcessor , SCREAMING_SNAKE_CASE_ : AutoencoderKL , SCREAMING_SNAKE_CASE_ : CLIPTextModel , SCREAMING_SNAKE_CASE_ : CLIPTokenizer , SCREAMING_SNAKE_CASE_ : UNetaDConditionModel , SCREAMING_SNAKE_CASE_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE_ : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE_ : CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
lowerCAmelCase_ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict=1_6_0_0_0 , SCREAMING_SNAKE_CASE_ : int = 5_1_2 , SCREAMING_SNAKE_CASE_ : int = 5_1_2 , SCREAMING_SNAKE_CASE_ : int = 5_0 , SCREAMING_SNAKE_CASE_ : float = 7.5 , SCREAMING_SNAKE_CASE_ : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = 1 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase_ : Union[str, Any] = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors='pt' , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
lowerCAmelCase_ : Optional[int] = self.speech_model.generate(__UpperCamelCase , max_length=4_8_0_0_0_0 )
lowerCAmelCase_ : Dict = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : str = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = len(__UpperCamelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(__UpperCamelCase )}." )
# get prompt text embeddings
lowerCAmelCase_ : List[Any] = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase_ : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase_ : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase_ : Any = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase_ : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = text_embeddings.shape
lowerCAmelCase_ : Dict = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
lowerCAmelCase_ : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase_ : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase_ : Optional[Any] = 4_2
if negative_prompt is None:
lowerCAmelCase_ : int = [''] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
F" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : Union[str, Any] = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase_ : str = negative_prompt
lowerCAmelCase_ : Tuple = text_input_ids.shape[-1]
lowerCAmelCase_ : Optional[int] = self.tokenizer(
__UpperCamelCase , padding='max_length' , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors='pt' , )
lowerCAmelCase_ : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase_ : int = uncond_embeddings.shape[1]
lowerCAmelCase_ : Optional[Any] = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
lowerCAmelCase_ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase_ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase_ : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase_ : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase_ : int = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device='cpu' , dtype=__UpperCamelCase ).to(
self.device )
else:
lowerCAmelCase_ : List[str] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase_ : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase_ : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase_ : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ : List[Any] = {}
if accepts_eta:
lowerCAmelCase_ : List[str] = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ : Tuple = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
lowerCAmelCase_ : str = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase_ ,lowerCAmelCase_ : int = noise_pred.chunk(2 )
lowerCAmelCase_ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ : int = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Tuple = 1 / 0.1_82_15 * latents
lowerCAmelCase_ : Dict = self.vae.decode(__UpperCamelCase ).sample
lowerCAmelCase_ : int = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase_ : Optional[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 224
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( lowercase_ , unittest.TestCase ):
UpperCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
A = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(__UpperCamelCase ) )
A = np.random.RandomState(__UpperCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :Any ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Dict ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Optional[Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowerCamelCase ( self :Union[str, Any] ):
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__UpperCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self :Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self :Optional[int] ):
A = ort.SessionOptions()
A = False
return options
def lowerCamelCase ( self :Dict ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowerCamelCase ( self :Any ):
A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A = init_image.resize((7_68, 5_12) )
A = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "A fantasy landscape, trending on artstation"
A = np.random.RandomState(0 )
A = pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCamelCase , output_type="np" , )
A = output.images
A = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
A = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 292
| 0
|
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = CustomTokenizer
pass
| 174
|
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
_A = 0
_A = str(__lowercase )
while len(__lowercase ) != 1:
_A = [int(__lowercase ) for i in num_string]
_A = 1
for i in range(0 , len(__lowercase ) ):
total *= numbers[i]
_A = str(__lowercase )
steps += 1
return steps
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
_A = 0
_A = str(__lowercase )
while len(__lowercase ) != 1:
_A = [int(__lowercase ) for i in num_string]
_A = 0
for i in range(0 , len(__lowercase ) ):
total += numbers[i]
_A = str(__lowercase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Any = (DDPMScheduler,)
def a ( self : List[str] , **_lowercase : Optional[int] ):
__UpperCAmelCase = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_lowercase )
return config
def a ( self : Optional[Any] ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowercase )
def a ( self : Optional[Any] ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def a ( self : Tuple ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def a ( self : Any ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def a ( self : Union[str, Any] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def a ( self : Any ):
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def a ( self : Any ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def a ( self : Union[str, Any] ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_lowercase )
def a ( self : Tuple ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
__UpperCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__UpperCAmelCase = pred_prev_sample
__UpperCAmelCase = torch.sum(torch.abs(_lowercase ) )
__UpperCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def a ( self : int ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
__UpperCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__UpperCAmelCase = pred_prev_sample
__UpperCAmelCase = torch.sum(torch.abs(_lowercase ) )
__UpperCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def a ( self : Any ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
__UpperCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
__UpperCAmelCase = -1
else:
__UpperCAmelCase = timesteps[i + 1]
__UpperCAmelCase = scheduler.previous_timestep(_lowercase )
__UpperCAmelCase = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def a ( self : Any ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_lowercase )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = [1_00, 87, 50, 1, 0]
__UpperCAmelCase = len(_lowercase )
with self.assertRaises(_lowercase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def a ( self : str ):
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**_lowercase )
__UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 332
|
"""simple docstring"""
from collections import defaultdict
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = first_str.lower().strip()
__UpperCAmelCase = second_str.lower().strip()
# Remove whitespace
__UpperCAmelCase = first_str.replace(''' ''' , '''''' )
__UpperCAmelCase = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(snake_case_ ) != len(snake_case_ ):
return False
# Default values for count should be 0
__UpperCAmelCase = defaultdict(snake_case_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(snake_case_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : List[Any] = input('Enter the first string ').strip()
_lowercase : Tuple = input('Enter the second string ').strip()
_lowercase : str = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 332
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = XLMTokenizer
lowerCAmelCase__ = False
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__lowercase =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase))))
__lowercase =['l o 123', 'lo w 1456', 'e r</w> 1789', '']
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w') as fp:
fp.write(json.dumps(_lowerCAmelCase))
with open(self.merges_file , 'w') as fp:
fp.write('\n'.join(_lowerCAmelCase))
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Any):
'''simple docstring'''
__lowercase ='lower newer'
__lowercase ='lower newer'
return input_text, output_text
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =XLMTokenizer(self.vocab_file , self.merges_file)
__lowercase ='lower'
__lowercase =['low', 'er</w>']
__lowercase =tokenizer.tokenize(_lowerCAmelCase)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =tokens + ['<unk>']
__lowercase =[1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , _lowerCAmelCase)
@slow
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =XLMTokenizer.from_pretrained('xlm-mlm-en-2048')
__lowercase =tokenizer.encode('sequence builders' , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase)
__lowercase =tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 48
| 1
|
from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not nums:
return 0
__UpperCamelCase :str = nums[0]
__UpperCamelCase :Optional[int] = 0
for num in nums[1:]:
__UpperCamelCase , __UpperCamelCase :List[str] = (
max_excluding + num,
max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
)
return max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return np.sum(outputs == labels )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f:
__UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
next(SCREAMING_SNAKE_CASE__ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
for dataset in encoded_datasets:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa )
__UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =mc_label
__UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) )
return tensor_datasets
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
print(SCREAMING_SNAKE_CASE__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__UpperCamelCase =['_start_', '_delimiter_', '_classify_']
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
model.to(SCREAMING_SNAKE_CASE__ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj]
logger.info('Encoding dataset...' )
__UpperCamelCase =load_rocstories_dataset(args.train_dataset )
__UpperCamelCase =load_rocstories_dataset(args.eval_dataset )
__UpperCamelCase =(train_dataset, eval_dataset)
__UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ )
# Compute the max input length for the Transformer
__UpperCamelCase =model.config.n_positions // 2 - 2
__UpperCamelCase =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1]
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size )
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__UpperCamelCase =args.max_steps
__UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1
else:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__UpperCamelCase =list(model.named_parameters() )
__UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__UpperCamelCase =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon )
__UpperCamelCase =get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ )
if args.do_train:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__UpperCamelCase =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE__ )
if args.do_eval:
model.eval()
__UpperCamelCase , __UpperCamelCase =0, 0
__UpperCamelCase , __UpperCamelCase =0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model(
SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =mc_logits.detach().cpu().numpy()
__UpperCamelCase =mc_labels.to('cpu' ).numpy()
__UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__UpperCamelCase =eval_loss / nb_eval_steps
__UpperCamelCase =eval_accuracy / nb_eval_examples
__UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None
__UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 62
| 0
|
from __future__ import annotations
def A (__A : list[list[int]] ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
# We need to create solution object to save path.
UpperCAmelCase_ = [[0 for _ in range(__A )] for _ in range(__A )]
UpperCAmelCase_ = run_maze(__A , 0 , 0 , __A )
if solved:
print('''\n'''.join(str(__A ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def A (__A : list[list[int]] , __A : int , __A : int , __A : list[list[int]] ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase_ = 1
return True
UpperCAmelCase_ = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase_ = 1
# check for directions
if (
run_maze(__A , i + 1 , __A , __A )
or run_maze(__A , __A , j + 1 , __A )
or run_maze(__A , i - 1 , __A , __A )
or run_maze(__A , __A , j - 1 , __A )
):
return True
UpperCAmelCase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7
| 1
|
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_UpperCamelCase : Optional[Any] = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_UpperCamelCase : Dict = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_UpperCamelCase : Optional[int] = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def _UpperCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def _UpperCAmelCase ( self , a , a , a=4 , a=False ) -> Optional[int]:
lowercase__ : Any = compute_bleu(
reference_corpus=a , translation_corpus=a , max_order=a , smooth=a )
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 77
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str]=13 , __UpperCamelCase : str=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Dict=32 , __UpperCamelCase : int=5 , __UpperCamelCase : str=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : List[str]=16 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : str=False , __UpperCamelCase : Dict=True , __UpperCamelCase : Tuple="None" , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Any=None , ) -> Tuple:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase = self.get_config()
_UpperCamelCase = 300
return config
def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any] ) -> str:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCamelCase ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ) -> List[str]:
_UpperCamelCase = DebertaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCamelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCamelCase = model(__UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int] ) -> Tuple:
_UpperCamelCase = DebertaForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> List[Any]:
_UpperCamelCase = self.num_labels
_UpperCamelCase = DebertaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Dict:
_UpperCamelCase = self.num_labels
_UpperCamelCase = DebertaForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) -> List[Any]:
_UpperCamelCase = DebertaForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase = DebertaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _UpperCamelCase ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ) -> List[str]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCamelCase )
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> Tuple:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DebertaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
pass
@slow
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
_UpperCamelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
# compare the actual values for a slice.
_UpperCamelCase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 256
| 0
|
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase ):
A_ : Optional[int] = len(lowercase )
A_ : Any = [0] * len_array
if len_array > 0:
A_ : str = array[0]
for i in range(1 , lowercase ):
A_ : Any = self.prefix_sum[i - 1] + array[i]
def _a (self , lowercase , lowercase ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _a (self , lowercase ):
A_ : Tuple = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase :Dict = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'bart'
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Dict = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , lowercase=50265 , lowercase=1024 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=0.0 , lowercase=0.0 , lowercase="gelu" , lowercase=1024 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=0.0 , lowercase=False , lowercase=True , lowercase=3 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=True , lowercase=2 , lowercase=2 , **lowercase , ):
A_ : Optional[int] = vocab_size
A_ : Dict = max_position_embeddings
A_ : Dict = d_model
A_ : Any = encoder_ffn_dim
A_ : Dict = encoder_layers
A_ : Optional[int] = encoder_attention_heads
A_ : Tuple = decoder_ffn_dim
A_ : List[str] = decoder_layers
A_ : int = decoder_attention_heads
A_ : Dict = dropout
A_ : List[str] = attention_dropout
A_ : int = activation_dropout
A_ : Dict = activation_function
A_ : List[Any] = init_std
A_ : Dict = encoder_layerdrop
A_ : Tuple = decoder_layerdrop
A_ : Optional[int] = classifier_dropout
A_ : Union[str, Any] = use_cache
A_ : Dict = encoder_layers
A_ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , lowercase ):
A_ : List[str] = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
def _a (self ):
if self.task in ["default", "seq2seq-lm"]:
A_ : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
A_ : Union[str, Any] = {0: """batch"""}
A_ : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
A_ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
A_ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowercase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A_ : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
A_, A_ : Optional[int] = self.num_layers
for i in range(lowercase ):
A_ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
A_ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
A_ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def _a (self ):
if self.task in ["default", "seq2seq-lm"]:
A_ : int = super().outputs
else:
A_ : int = super(lowercase , self ).outputs
if self.use_past:
A_, A_ : Union[str, Any] = self.num_layers
for i in range(lowercase ):
A_ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
A_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Generate decoder inputs
A_ : int = seq_length if not self.use_past else 1
A_ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase )
A_ : Optional[int] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
A_ : Dict = dict(**lowercase , **lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_, A_ : Union[str, Any] = common_inputs["""input_ids"""].shape
A_ : Optional[Any] = common_inputs["""decoder_input_ids"""].shape[1]
A_, A_ : str = self.num_attention_heads
A_ : Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Tuple = decoder_seq_length + 3
A_ : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A_ : List[str] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowercase , lowercase )] , dim=1 )
A_ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A_, A_ : Optional[int] = self.num_layers
A_ : List[Any] = min(lowercase , lowercase )
A_ : Tuple = max(lowercase , lowercase ) - min_num_layers
A_ : List[str] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
torch.zeros(lowercase ),
) )
# TODO: test this.
A_ : List[Any] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowercase , lowercase ):
common_inputs["past_key_values"].append((torch.zeros(lowercase ), torch.zeros(lowercase )) )
return common_inputs
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
A_ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase , lowercase , lowercase , lowercase , lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A_, A_ : List[str] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A_ : Optional[Any] = seqlen + 2
A_, A_ : str = self.num_layers
A_, A_ : Optional[int] = self.num_attention_heads
A_ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A_ : Union[str, Any] = common_inputs["""attention_mask"""].dtype
A_ : int = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 )
A_ : int = [
(torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(lowercase )
]
return common_inputs
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A_ : List[Any] = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A_ : Union[str, Any] = tokenizer.num_special_tokens_to_add(lowercase )
A_ : List[Any] = compute_effective_axis_dimension(
lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase )
# Generate dummy inputs according to compute batch and sequence
A_ : str = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
A_ : Tuple = dict(tokenizer(lowercase , return_tensors=lowercase ) )
return common_inputs
def _a (self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ):
if self.task in ["default", "seq2seq-lm"]:
A_ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
elif self.task == "causal-lm":
A_ : List[str] = self._generate_dummy_inputs_for_causal_lm(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
else:
A_ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
return common_inputs
def _a (self , lowercase , lowercase , lowercase , lowercase ):
if self.task in ["default", "seq2seq-lm"]:
A_ : Optional[Any] = super()._flatten_past_key_values_(lowercase , lowercase , lowercase , lowercase )
else:
A_ : List[Any] = super(lowercase , self )._flatten_past_key_values_(
lowercase , lowercase , lowercase , lowercase )
| 135
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "sew"
def __init__( self, lowerCAmelCase__=32, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__=2, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__="group", lowerCAmelCase__="gelu", lowerCAmelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), lowerCAmelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowerCAmelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowerCAmelCase__=False, lowerCAmelCase__=128, lowerCAmelCase__=16, lowerCAmelCase__=True, lowerCAmelCase__=0.05, lowerCAmelCase__=10, lowerCAmelCase__=2, lowerCAmelCase__=0.0, lowerCAmelCase__=10, lowerCAmelCase__=0, lowerCAmelCase__="mean", lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=256, lowerCAmelCase__=0, lowerCAmelCase__=1, lowerCAmelCase__=2, **lowerCAmelCase__, ) -> str:
super().__init__(**lowerCAmelCase__, pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__)
snake_case_ = hidden_size
snake_case_ = feat_extract_norm
snake_case_ = feat_extract_activation
snake_case_ = list(lowerCAmelCase__)
snake_case_ = list(lowerCAmelCase__)
snake_case_ = list(lowerCAmelCase__)
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = len(self.conv_dim)
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = squeeze_factor
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f'but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)'
f'= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = apply_spec_augment
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# sequence classification
snake_case_ = use_weighted_layer_sum
snake_case_ = classifier_proj_size
@property
def a_ ( self) -> Optional[Any]:
return functools.reduce(operator.mul, self.conv_stride, 1)
| 69
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCamelCase ( snake_case_ ):
def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 294
| 0
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : int ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, nicht wahr?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase_ : Optional[int] = {
"""wmt16-en-de-dist-12-1""": [28.3, 27.52],
"""wmt16-en-de-dist-6-1""": [27.4, 27.11],
"""wmt16-en-de-12-1""": [26.9, 25.75],
}
lowerCAmelCase_ : Optional[int] = f'{src_lang}-{tgt_lang}'
lowerCAmelCase_ : Optional[Any] = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n'
model_card_dir.mkdir(parents=lowercase__ , exist_ok=lowercase__ )
lowerCAmelCase_ : List[str] = os.path.join(lowercase__ , """README.md""" )
print(f'Generating {path}' )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase__ )
# make sure we are under the root of the project
__UpperCAmelCase = Path(__file__).resolve().parent.parent.parent
__UpperCAmelCase = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
__UpperCAmelCase = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 366
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
__UpperCAmelCase = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __a ( __UpperCamelCase ):
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
__snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = ElectraTokenizer
def __init__( self : List[Any] , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Any="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : Optional[Any]="[MASK]" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Optional[Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[Any] = getattr(UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : Tuple = strip_accents
lowerCAmelCase_ : Union[str, Any] = tokenize_chinese_chars
lowerCAmelCase_ : int = normalizer_class(**UpperCAmelCase )
lowerCAmelCase_ : str = do_lower_case
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
lowerCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
lowerCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 28
| 0
|
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [int(A_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(A_ ) == 4 and all(0 <= int(A_ ) <= 254 for octet in octets )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = input().strip()
__lowerCAmelCase : int = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 88
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=("DownEncoderBlock2D",) , UpperCamelCase__ : Optional[Any]=(64,) , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : List[str]=True , ) -> str:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
# down
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = 2 * out_channels if double_z else out_channels
__magic_name__ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = x
__magic_name__ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : int ):
def custom_forward(*UpperCamelCase__ : str ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
__magic_name__ = down_block(UpperCamelCase__ )
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ )
# post-process
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[Any]=("UpDecoderBlock2D",) , UpperCamelCase__ : List[Any]=(64,) , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[int]="silu" , UpperCamelCase__ : Tuple="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
__magic_name__ = in_channels if norm_type == """spatial""" else None
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
__magic_name__ = list(reversed(UpperCamelCase__ ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
__magic_name__ = output_channel
# out
if norm_type == "spatial":
__magic_name__ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None ) -> Tuple:
"""simple docstring"""
__magic_name__ = z
__magic_name__ = self.conv_in(UpperCamelCase__ )
__magic_name__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Optional[int] ):
def custom_forward(*UpperCamelCase__ : int ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
else:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict="random" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=True ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = n_e
__magic_name__ = vq_embed_dim
__magic_name__ = beta
__magic_name__ = legacy
__magic_name__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__magic_name__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__magic_name__ = self.used.shape[0]
__magic_name__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__magic_name__ = self.re_embed
__magic_name__ = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__magic_name__ = n_e
__magic_name__ = sane_index_shape
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
__magic_name__ = (inds[:, :, None] == used[None, None, ...]).long()
__magic_name__ = match.argmax(-1 )
__magic_name__ = match.sum(2 ) < 1
if self.unknown_index == "random":
__magic_name__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__magic_name__ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
__magic_name__ = 0 # simply set to zero
__magic_name__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
__magic_name__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__magic_name__ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
__magic_name__ = self.embedding(UpperCamelCase__ ).view(z.shape )
__magic_name__ = None
__magic_name__ = None
# compute loss for embedding
if not self.legacy:
__magic_name__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__magic_name__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__magic_name__ = z + (z_q - z).detach()
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__magic_name__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__magic_name__ = self.remap_to_used(UpperCamelCase__ )
__magic_name__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__magic_name__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
if self.remap is not None:
__magic_name__ = indices.reshape(shape[0] , -1 ) # add batch axis
__magic_name__ = self.unmap_to_all(UpperCamelCase__ )
__magic_name__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__magic_name__ = self.embedding(UpperCamelCase__ )
if shape is not None:
__magic_name__ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parameters
__magic_name__ , __magic_name__ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
__magic_name__ = torch.clamp(self.logvar , -30.0 , 20.0 )
__magic_name__ = deterministic
__magic_name__ = torch.exp(0.5 * self.logvar )
__magic_name__ = torch.exp(self.logvar )
if self.deterministic:
__magic_name__ = __magic_name__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
__magic_name__ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__magic_name__ = self.mean + self.std * sample
return x
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
__magic_name__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.mean
| 88
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
|
def __lowerCamelCase ( __a :float , __a :list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
A__ = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__a ) )
return round(__a , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MgpstrTokenizer
__UpperCamelCase = False
__UpperCamelCase = {}
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowerCamelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCamelCase = dict(zip(_a , range(len(_a ) ) ) )
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = """tester"""
lowerCamelCase = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCamelCase = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowerCamelCase = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
lowerCamelCase = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCamelCase , lowerCamelCase = self.get_input_output_texts(_a )
lowerCamelCase = tokenizer.tokenize(_a )
lowerCamelCase = tokenizer.convert_tokens_to_ids(_a )
lowerCamelCase = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
lowerCamelCase = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
lowerCamelCase = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(""" """ , """""" ) , _a )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
| 291
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
lowerCAmelCase : Any = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
lowerCAmelCase : Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
lowerCAmelCase : Any = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def _lowerCAmelCase ( self , _a , _a , _a=None , _a=1 , _a="binary" , _a=None , _a="warn" , ):
"""simple docstring"""
lowerCamelCase = recall_score(
_a , _a , labels=_a , pos_label=_a , average=_a , sample_weight=_a , zero_division=_a , )
return {"recall": float(_a ) if score.size == 1 else score}
| 291
| 1
|
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = None
if token is not None:
lowerCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
lowerCAmelCase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
lowerCAmelCase = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
lowerCAmelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = requests.get(url + f'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str]=None ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = None
if token is not None:
lowerCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
lowerCAmelCase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
lowerCAmelCase = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
lowerCAmelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase = requests.get(url + f'&page={i + 2}' , headers=_SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
lowerCAmelCase = None
if token is not None:
lowerCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
lowerCAmelCase = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = result.headers["""Location"""]
lowerCAmelCase = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , f'{artifact_name}.zip' )
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fp:
fp.write(response.content )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ) -> Any:
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = None
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_SCREAMING_SNAKE_CASE ) as f:
for line in f:
lowerCAmelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase = line[: line.index(""": """ )]
lowerCAmelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase = line[len("""FAILED """ ) :]
failed_tests.append(_SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
lowerCAmelCase = line
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` '
f'and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
lowerCAmelCase = None
if job_name and job_links:
lowerCAmelCase = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return result
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = []
lowerCAmelCase = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) )
return errors
def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase = counter.most_common()
lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase = test.split("""/""" )[2]
else:
lowerCAmelCase = None
return test
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any=None ) -> Any:
"""simple docstring"""
lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase = [x for x in logs if x[2] is not None]
lowerCAmelCase = {x[2] for x in logs}
lowerCAmelCase = {}
for test in tests:
lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase = counter.most_common()
lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase = dict(sorted(r.items() , key=lambda _SCREAMING_SNAKE_CASE : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = """| no. | error | status |"""
lowerCAmelCase = """|-:|:-|:-|"""
lowerCAmelCase = [header, sep]
for error in reduced_by_error:
lowerCAmelCase = reduced_by_error[error]["""count"""]
lowerCAmelCase = f'| {count} | {error[:100]} | |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = """| model | no. of errors | major error | count |"""
lowerCAmelCase = """|-:|-:|-:|-:|"""
lowerCAmelCase = [header, sep]
for model in reduced_by_model:
lowerCAmelCase = reduced_by_model[model]["""count"""]
lowerCAmelCase, lowerCAmelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
UpperCAmelCase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCAmelCase = get_job_links(args.workflow_run_id, token=args.token)
UpperCAmelCase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCAmelCase = k.find(' / ')
UpperCAmelCase = k[index + len(' / ') :]
UpperCAmelCase = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCAmelCase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCAmelCase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCAmelCase = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCAmelCase = reduce_by_error(errors)
UpperCAmelCase = reduce_by_model(errors)
UpperCAmelCase = make_github_table(reduced_by_error)
UpperCAmelCase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 187
|
'''simple docstring'''
from __future__ import annotations
def _snake_case ( _SCREAMING_SNAKE_CASE : int | str ) -> bool:
"""simple docstring"""
lowerCAmelCase = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def _snake_case ( _SCREAMING_SNAKE_CASE : int = 1_000_000 ) -> Dict:
"""simple docstring"""
lowerCAmelCase = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 187
| 1
|
from math import isqrt, loga
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase = False
return [i for i in range(2 , lowerCamelCase__ ) if is_prime[i]]
def __lowerCamelCase ( lowerCamelCase__ : int = 800800 , lowerCamelCase__ : int = 800800 ):
'''simple docstring'''
lowerCamelCase = degree * loga(lowerCamelCase__ )
lowerCamelCase = int(lowerCamelCase__ )
lowerCamelCase = calculate_prime_numbers(lowerCamelCase__ )
lowerCamelCase = 0
lowerCamelCase = 0
lowerCamelCase = len(lowerCamelCase__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 252
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ["input_features"]
def __init__( self , A=80 , A=1_60_00 , A=1_60 , A=30 , A=4_00 , A=0.0 , A=False , **A , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , return_attention_mask=A , **A , )
lowerCamelCase = n_fft
lowerCamelCase = hop_length
lowerCamelCase = chunk_length
lowerCamelCase = chunk_length * sampling_rate
lowerCamelCase = self.n_samples // hop_length
lowerCamelCase = sampling_rate
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=A , norm="""slaney""" , mel_scale="""slaney""" , )
def __A ( self , A ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase = spectrogram(
A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = np.maximum(A , log_spec.max() - 8.0 )
lowerCamelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __A ( A , A , A = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowerCamelCase = np.array(A , np.intaa )
lowerCamelCase = []
for vector, length in zip(A , attention_mask.sum(-1 ) ):
lowerCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase = padding_value
normed_input_values.append(A )
else:
lowerCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , A , A = True , A = None , A = None , A = None , A = "max_length" , A = None , A = None , A = None , **A , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
lowerCamelCase = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
lowerCamelCase = self.pad(
A , padding=A , max_length=max_length if max_length else self.n_samples , truncation=A , pad_to_multiple_of=A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCamelCase = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
lowerCamelCase = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
lowerCamelCase = [self._np_extract_fbank_features(A ) for waveform in input_features[0]]
if isinstance(input_features[0] , A ):
lowerCamelCase = [np.asarray(A , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase = padded_inputs.convert_to_tensors(A )
return padded_inputs
def __A ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 252
| 1
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: List[str] , _lowerCAmelCase: str , _lowerCAmelCase: List[str]=13 , _lowerCAmelCase: Tuple=7 , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: List[Any]=True , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: int=True , _lowerCAmelCase: List[str]=99 , _lowerCAmelCase: List[str]=32 , _lowerCAmelCase: Dict=5 , _lowerCAmelCase: str=4 , _lowerCAmelCase: List[Any]=37 , _lowerCAmelCase: Tuple="gelu" , _lowerCAmelCase: int=0.1 , _lowerCAmelCase: Any=0.1 , _lowerCAmelCase: Tuple=5_12 , _lowerCAmelCase: Tuple=16 , _lowerCAmelCase: Optional[Any]=2 , _lowerCAmelCase: List[str]=0.02 , _lowerCAmelCase: List[str]=False , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: int="None" , _lowerCAmelCase: List[Any]=3 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: int=None , ):
lowercase :int = parent
lowercase :Dict = batch_size
lowercase :Optional[Any] = seq_length
lowercase :int = is_training
lowercase :int = use_input_mask
lowercase :int = use_token_type_ids
lowercase :str = use_labels
lowercase :Tuple = vocab_size
lowercase :int = hidden_size
lowercase :Any = num_hidden_layers
lowercase :Any = num_attention_heads
lowercase :Dict = intermediate_size
lowercase :Union[str, Any] = hidden_act
lowercase :Optional[Any] = hidden_dropout_prob
lowercase :Tuple = attention_probs_dropout_prob
lowercase :Optional[Any] = max_position_embeddings
lowercase :int = type_vocab_size
lowercase :Tuple = type_sequence_label_size
lowercase :Optional[int] = initializer_range
lowercase :Optional[Any] = num_labels
lowercase :int = num_choices
lowercase :List[str] = relative_attention
lowercase :Tuple = position_biased_input
lowercase :Tuple = pos_att_type
lowercase :Dict = scope
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase :int = None
if self.use_input_mask:
lowercase :Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase :Optional[int] = None
if self.use_token_type_ids:
lowercase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase :List[Any] = None
lowercase :Any = None
lowercase :Optional[int] = None
if self.use_labels:
lowercase :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase :Any = ids_tensor([self.batch_size] , self.num_choices )
lowercase :Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Dict ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str , _lowerCAmelCase: str , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: int , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict ):
lowercase :Tuple = DebertaVaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :List[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )[0]
lowercase :Dict = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )[0]
lowercase :List[str] = model(_lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Any , _lowerCAmelCase: Dict , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Any] ):
lowercase :int = DebertaVaForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str ):
lowercase :Any = self.num_labels
lowercase :Optional[Any] = DebertaVaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] ):
lowercase :List[str] = self.num_labels
lowercase :int = DebertaVaForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :Union[str, Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[str] ):
lowercase :Optional[Any] = DebertaVaForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :List[Any] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: str , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: int , _lowerCAmelCase: List[Any] ):
lowercase :Tuple = DebertaVaForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase :List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase :Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase :Optional[Any] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :int = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) :int = config_and_inputs
lowercase :Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_a = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :List[Any] = DebertaVaModelTester(self )
lowercase :List[Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self: str ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase :Dict = DebertaVaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase):
@unittest.skip(reason="Model not available yet" )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
lowercase :Dict = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
lowercase :Dict = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowercase :Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase :Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
# compare the actual values for a slice.
lowercase :Any = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 158
|
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 158
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase :str = logging.get_logger(__name__)
lowerCAmelCase :Optional[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase :List[str] = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
lowerCAmelCase :List[str] = {'''facebook/blenderbot-3B''': 1_2_8}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = VOCAB_FILES_NAMES
A_ : int = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = ["""input_ids""", """attention_mask"""]
A_ : Union[str, Any] = BlenderbotTokenizer
def __init__( self : Optional[Any] , _A : Dict=None , _A : str=None , _A : str=None , _A : Optional[int]="replace" , _A : str="<s>" , _A : int="</s>" , _A : List[Any]="</s>" , _A : List[Any]="<s>" , _A : Tuple="<unk>" , _A : str="<pad>" , _A : Dict="<mask>" , _A : Optional[Any]=False , _A : str=True , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
__magic_name__ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _A ) != add_prefix_space:
__magic_name__ : Tuple = getattr(_A , pre_tok_state.pop('type' ) )
__magic_name__ : Optional[int] = add_prefix_space
__magic_name__ : str = pre_tok_class(**_A )
__magic_name__ : Tuple = add_prefix_space
__magic_name__ : int = 'post_processor'
__magic_name__ : str = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
__magic_name__ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__magic_name__ : Union[str, Any] = tuple(state['sep'] )
if "cls" in state:
__magic_name__ : Tuple = tuple(state['cls'] )
__magic_name__ : str = False
if state.get('add_prefix_space' , _A ) != add_prefix_space:
__magic_name__ : Optional[int] = add_prefix_space
__magic_name__ : Union[str, Any] = True
if state.get('trim_offsets' , _A ) != trim_offsets:
__magic_name__ : List[str] = trim_offsets
__magic_name__ : Optional[int] = True
if changes_to_apply:
__magic_name__ : List[str] = getattr(_A , state.pop('type' ) )
__magic_name__ : str = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCAmelCase ( self : List[str] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self : Optional[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
__magic_name__ : Dict = value
def __lowerCAmelCase ( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> BatchEncoding:
__magic_name__ : Any = kwargs.get('is_split_into_words' , _A )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A )
def __lowerCAmelCase ( self : Optional[int] , *_A : Tuple , **_A : Union[str, Any] ) -> BatchEncoding:
__magic_name__ : Any = kwargs.get('is_split_into_words' , _A )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A )
def __lowerCAmelCase ( self : Any , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
__magic_name__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def __lowerCAmelCase ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ) -> Tuple:
return token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self : List[str] , _A : "Conversation" ) -> List[int]:
__magic_name__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(_A )
__magic_name__ : int = ' '.join(_A )
__magic_name__ : Any = self.encode(_A )
if len(_A ) > self.model_max_length:
__magic_name__ : Any = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 331
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase :int = '''pt'''
elif is_tf_available():
lowerCAmelCase :Optional[Any] = '''tf'''
else:
lowerCAmelCase :Optional[Any] = '''jax'''
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = ByTaTokenizer
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
super().setUp()
__magic_name__ : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__magic_name__ : Optional[Any] = []
for i in range(len(_A ) ):
try:
__magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) )
__magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__magic_name__ : Optional[int] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__magic_name__ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__magic_name__ : List[str] = [t[0] for t in toks]
# Ensure consistency
__magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__magic_name__ : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__magic_name__ : Union[str, Any] = ' ' + output_txt
__magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : Any = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : Optional[int] = self.ta_base_tokenizer
__magic_name__ : Optional[int] = 'Unicode €.'
__magic_name__ : Optional[Any] = tokenizer(_A )
__magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : Any = tokenizer.decode(_A )
self.assertEqual(_A , 'Unicode €.</s>' )
__magic_name__ : Any = tokenizer('e è é ê ë' )
__magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : List[str] = tokenizer.decode(_A )
self.assertEqual(_A , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __lowerCAmelCase ( self : Any ) -> int:
__magic_name__ : List[Any] = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__magic_name__ : str = list(batch.input_ids.numpy()[0] )
else:
__magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('decoder_input_ids' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = self.ta_base_tokenizer
__magic_name__ : Tuple = [
'Summary of the text.',
'Another summary.',
]
__magic_name__ : Dict = tokenizer(
text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : Any = ['A long paragraph for summarization. </s>']
__magic_name__ : List[str] = ['Summary of the text. </s>']
# fmt: off
__magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__magic_name__ : str = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['input_ids'][0] )
self.assertEqual(_A , batch['labels'][0] )
def __lowerCAmelCase ( self : Any ) -> str:
# safety check on max_len default value so we are sure the test works
__magic_name__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running'
__magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Optional[Any] = tempfile.mkdtemp()
__magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : Any = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Union[str, Any] = json.load(_A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Optional[Any] = json.load(_A )
__magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )]
__magic_name__ : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
__magic_name__ : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : str = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )]
__magic_name__ : Optional[Any] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
__magic_name__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
__magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : List[str] ) -> int:
pass
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
pass
def __lowerCAmelCase ( self : List[Any] ) -> int:
pass
def __lowerCAmelCase ( self : str ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__magic_name__ : int = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : List[str] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__magic_name__ : List[str] = 0
__magic_name__ : str = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] )
setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 331
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''llama'''
UpperCamelCase = ['''past_key_values''']
def __init__( self : Dict , A_ : List[Any]=32000 , A_ : Tuple=4096 , A_ : List[Any]=11008 , A_ : List[str]=32 , A_ : Optional[Any]=32 , A_ : int=None , A_ : Any="silu" , A_ : Union[str, Any]=2048 , A_ : List[str]=0.02 , A_ : Optional[int]=1E-6 , A_ : List[str]=True , A_ : Optional[int]=0 , A_ : Optional[Any]=1 , A_ : Optional[int]=2 , A_ : int=1 , A_ : Tuple=False , A_ : Tuple=None , **A_ : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = num_key_value_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_range
lowerCamelCase_ = rms_norm_eps
lowerCamelCase_ = pretraining_tp
lowerCamelCase_ = use_cache
lowerCamelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ , )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
lowerCamelCase_ = self.rope_scaling.get('type' , A_ )
lowerCamelCase_ = self.rope_scaling.get('factor' , A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 208
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 208
| 1
|
# Imports
import numpy as np
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[Any]:
self.set_matricies(red=UpperCamelCase__ , green=UpperCamelCase__ , blue=UpperCamelCase__ , red_edge=UpperCamelCase__ , nir=UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> Optional[int]:
if red is not None:
lowerCamelCase : Any = red
if green is not None:
lowerCamelCase : List[str] = green
if blue is not None:
lowerCamelCase : str = blue
if red_edge is not None:
lowerCamelCase : Tuple = red_edge
if nir is not None:
lowerCamelCase : List[str] = nir
return True
def _lowercase ( self , UpperCamelCase__="" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> List[str]:
self.set_matricies(red=UpperCamelCase__ , green=UpperCamelCase__ , blue=UpperCamelCase__ , red_edge=UpperCamelCase__ , nir=UpperCamelCase__ )
lowerCamelCase : str = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def _lowercase ( self ) -> Optional[Any]:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _lowercase ( self ) -> int:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _lowercase ( self ) -> Optional[Any]:
return self.nir * (self.red / (self.green**2))
def _lowercase ( self ) -> Any:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _lowercase ( self ) -> List[Any]:
return (self.nir - self.red) / (self.nir + self.red)
def _lowercase ( self ) -> Any:
return (self.nir - self.blue) / (self.nir + self.blue)
def _lowercase ( self ) -> Any:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _lowercase ( self ) -> List[Any]:
return (self.nir - self.green) / (self.nir + self.green)
def _lowercase ( self ) -> Optional[Any]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _lowercase ( self ) -> Dict:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _lowercase ( self ) -> str:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _lowercase ( self ) -> List[str]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _lowercase ( self , UpperCamelCase__=0.08 , UpperCamelCase__=1.22 , UpperCamelCase__=0.03 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _lowercase ( self ) -> str:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _lowercase ( self ) -> Dict:
return (self.nir / self.green) - 1
def _lowercase ( self ) -> List[Any]:
return (self.nir / self.redEdge) - 1
def _lowercase ( self ) -> Optional[int]:
return (self.red - self.blue) / self.red
def _lowercase ( self ) -> Any:
lowerCamelCase : Optional[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _lowercase ( self ) -> Optional[int]:
return self.nir - self.green
def _lowercase ( self ) -> Tuple:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _lowercase ( self , UpperCamelCase__=0.16 ) -> Any:
return (self.nir - self.green) / (self.nir + self.green + y)
def _lowercase ( self , UpperCamelCase__=0.5 ) -> Tuple:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _lowercase ( self ) -> List[Any]:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
return (self.nir - b) / (a * self.red)
def _lowercase ( self ) -> Dict:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _lowercase ( self ) -> Union[str, Any]:
return (self.red + self.green + self.blue) / 30.5
def _lowercase ( self ) -> int:
return self.nir / self.red
def _lowercase ( self ) -> List[Any]:
return (self.rvi() - 1) / (self.rvi() + 1)
def _lowercase ( self ) -> Optional[Any]:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _lowercase ( self ) -> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _lowercase ( self ) -> int:
return self.nir / (self.nir + self.red + self.green)
def _lowercase ( self ) -> Tuple:
return self.red / (self.nir + self.red + self.green)
def _lowercase ( self ) -> Optional[int]:
return (self.green - self.red) / (self.green + self.red)
def _lowercase ( self ) -> Any:
return (self.red - self.green) / (self.red + self.green)
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Union[str, Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase : Union[str, Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _lowercase ( self ) -> int:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _lowercase ( self ) -> Optional[int]:
return self.nir / self.red
def _lowercase ( self ) -> Optional[Any]:
return (self.ndvi() + 0.5) ** (1 / 2)
def _lowercase ( self ) -> Dict:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 48
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model"""
def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Dict = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Union[str, Any] = attention_dropout
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = qkv_bias
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """blip_2_qformer"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = position_embedding_type
lowerCamelCase : Tuple = cross_attention_frequency
lowerCamelCase : Optional[int] = encoder_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : int = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """blip-2"""
lowerCamelCase_ : int = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str:
super().__init__(**UpperCamelCase__ )
if vision_config is None:
lowerCamelCase : List[Any] = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
lowerCamelCase : List[Any] = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
lowerCamelCase : Any = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ )
lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ )
lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings
lowerCamelCase : int = self.text_config.is_encoder_decoder
lowerCamelCase : Optional[Any] = num_query_tokens
lowerCamelCase : int = self.vision_config.hidden_size
lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase : Dict = 1.0
lowerCamelCase : List[Any] = 0.02
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase : Tuple = self.vision_config.to_dict()
lowerCamelCase : int = self.qformer_config.to_dict()
lowerCamelCase : Optional[Any] = self.text_config.to_dict()
lowerCamelCase : int = self.__class__.model_type
return output
| 48
| 1
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : str , ):
"""simple docstring"""
_A: Union[str, Any] = parent
_A: Any = 1_3
_A: Union[str, Any] = 7
_A: Union[str, Any] = True
_A: Any = True
_A: Union[str, Any] = False
_A: Union[str, Any] = True
_A: Union[str, Any] = 9_9
_A: Optional[int] = 3_2
_A: List[str] = 2
_A: int = 4
_A: str = 3_7
_A: List[Any] = "gelu"
_A: List[str] = 0.1
_A: int = 0.1
_A: Any = 5_1_2
_A: str = 1_6
_A: Any = 2
_A: List[str] = 0.02
_A: List[Any] = 3
_A: List[str] = 4
_A: Optional[int] = None
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A: Union[str, Any] = None
if self.use_input_mask:
_A: Any = random_attention_mask([self.batch_size, self.seq_length] )
_A: List[Any] = None
_A: Tuple = None
_A: Union[str, Any] = None
if self.use_labels:
_A: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A: Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
_A: List[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ):
"""simple docstring"""
_A: List[Any] = TFDistilBertModel(config=lowerCAmelCase__ )
_A: Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
_A: Union[str, Any] = model(lowerCAmelCase__ )
_A: str = [input_ids, input_mask]
_A: Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
_A: Dict = TFDistilBertForMaskedLM(config=lowerCAmelCase__ )
_A: List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
_A: Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Tuple = TFDistilBertForQuestionAnswering(config=lowerCAmelCase__ )
_A: Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
_A: int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: int = self.num_labels
_A: List[str] = TFDistilBertForSequenceClassification(lowerCAmelCase__ )
_A: str = {"input_ids": input_ids, "attention_mask": input_mask}
_A: Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: str = self.num_choices
_A: Union[str, Any] = TFDistilBertForMultipleChoice(lowerCAmelCase__ )
_A: Optional[int] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
_A: List[Any] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
_A: Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
_A: List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: Optional[Any] = self.num_labels
_A: Any = TFDistilBertForTokenClassification(lowerCAmelCase__ )
_A: Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
_A: List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[Any] = self.prepare_config_and_inputs()
(_A): Tuple = config_and_inputs
_A: Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCamelCase : Dict = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : Any = False
__UpperCamelCase : Union[str, Any] = False
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: Dict = TFDistilBertModelTester(self )
_A: int = ConfigTester(self , config_class=lowerCAmelCase__ , dim=3_7 )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
_A: Union[str, Any] = TFDistilBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: List[Any] = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_A: Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A: Optional[int] = model(lowerCAmelCase__ )[0]
_A: int = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowerCAmelCase__ )
_A: List[str] = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
| 367
|
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Any = (DDPMParallelScheduler,)
def __magic_name__ ( self : Optional[int] , **lowerCAmelCase_ : Any ):
"""simple docstring"""
_A: Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase_ )
return config
def __magic_name__ ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase_ , beta_end=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase_ )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase_ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config()
_A: Optional[Any] = scheduler_class(**lowerCAmelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Any = self.scheduler_classes[0]
_A: List[str] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: List[Any] = len(lowerCAmelCase_ )
_A: Union[str, Any] = self.dummy_model()
_A: Dict = self.dummy_sample_deter
_A: Dict = self.dummy_sample_deter + 0.1
_A: str = self.dummy_sample_deter - 0.1
_A: str = samplea.shape[0]
_A: Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
_A: List[str] = torch.arange(lowerCAmelCase_ )[0:3, None].repeat(1 , lowerCAmelCase_ )
_A: List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_A: Optional[int] = scheduler.batch_step_no_noise(lowerCAmelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
_A: Dict = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: List[str] = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.5005 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[Any] = self.scheduler_classes[0]
_A: List[Any] = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Optional[int] = self.dummy_sample_deter
_A: List[str] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: Optional[int] = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: List[Any] = pred_prev_sample
_A: Optional[int] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: Any = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_A: List[str] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = len(lowerCAmelCase_ )
_A: Any = self.dummy_model()
_A: Any = self.dummy_sample_deter
_A: str = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase_ ) ):
# 1. predict noise residual
_A: Any = model(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict previous mean of sample x_t-1
_A: int = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
_A: Tuple = pred_prev_sample
_A: List[Any] = torch.sum(torch.abs(lowerCAmelCase_ ) )
_A: str = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Dict = scheduler_class(**lowerCAmelCase_ )
_A: Any = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
_A: Tuple = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase_ ):
if i == len(lowerCAmelCase_ ) - 1:
_A: Dict = -1
else:
_A: int = timesteps[i + 1]
_A: List[str] = scheduler.previous_timestep(lowerCAmelCase_ )
_A: str = prev_t.item()
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Tuple = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: Any = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCAmelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : int ):
"""simple docstring"""
_A: List[str] = self.scheduler_classes[0]
_A: Optional[Any] = self.get_scheduler_config()
_A: Union[str, Any] = scheduler_class(**lowerCAmelCase_ )
_A: Union[str, Any] = [1_0_0, 8_7, 5_0, 1, 0]
_A: Dict = len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: List[Any] = self.scheduler_classes[0]
_A: int = self.get_scheduler_config()
_A: str = scheduler_class(**lowerCAmelCase_ )
_A: Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 301
| 0
|
from __future__ import annotations
def _snake_case( SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> bool:
'''simple docstring'''
A__ = len(SCREAMING_SNAKE_CASE__ )
# We need to create solution object to save path.
A__ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
A__ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ )
if solved:
print('\n'.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def _snake_case( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> bool:
'''simple docstring'''
A__ = len(SCREAMING_SNAKE_CASE__ )
# Final check point.
if i == j == (size - 1):
A__ = 1
return True
A__ = (not i < 0) and (not j < 0) # Check lower bounds
A__ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A__ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A__ = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ )
):
return True
A__ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowercase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : tuple , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , ) -> Union[str, Any]:
'''simple docstring'''
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
else:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Tuple:
'''simple docstring'''
A__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A__ = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A__ = 'cpu'
A__ = Path(SCREAMING_SNAKE_CASE__ )
# VAE DECODER
A__ = AutoencoderKL.from_pretrained(model_path + '/vae' )
A__ = vae_decoder.config.latent_channels
# forward only through the decoder part
A__ = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE__ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE__ , )
del vae_decoder
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowercase_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 7
| 1
|
"""simple docstring"""
import random
def _A ( _a : int , _a : float , _a : bool = False ):
"""simple docstring"""
A = {i: [] for i in range(_snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_snake_case ):
for j in range(i + 1 , _snake_case ):
if random.random() < probability:
graph[i].append(_snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_snake_case )
return graph
def _A ( _a : int ):
"""simple docstring"""
return {
i: [j for j in range(_snake_case ) if i != j] for i in range(_snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({'''labels''': ClassLabel} )
_lowerCamelCase = "text"
_lowerCamelCase = "labels"
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Dict:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] ,lowerCamelCase_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A = copy.deepcopy(self )
A = self.label_schema.copy()
A = features[self.label_column]
A = label_schema
return task_template
@property
def UpperCamelCase__ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 77
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.