code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCAmelCase__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : List[str] = torchvision.models.resnetaaa(pretrained=__SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = list(model.children() )[:-2]
lowerCAmelCase : List[str] = nn.Sequential(*__SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.pool(self.model(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : Dict = torch.flatten(__SCREAMING_SNAKE_CASE , start_dim=2 )
lowerCAmelCase : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = [json.loads(__SCREAMING_SNAKE_CASE ) for l in open(__SCREAMING_SNAKE_CASE )]
lowerCAmelCase : int = os.path.dirname(__SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer
lowerCAmelCase : List[Any] = labels
lowerCAmelCase : Optional[int] = len(__SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = max_seq_length
lowerCAmelCase : Union[str, Any] = transforms
def __len__( self ):
"""simple docstring"""
return len(self.data )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
lowerCAmelCase : List[str] = sentence[: self.max_seq_length]
lowerCAmelCase : List[Any] = torch.zeros(self.n_classes )
lowerCAmelCase : Tuple = 1
lowerCAmelCase : Tuple = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
lowerCAmelCase : List[Any] = self.transforms(__SCREAMING_SNAKE_CASE )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : str = [len(row["sentence"] ) for row in batch]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ), max(_lowerCamelCase )
lowerCAmelCase : int = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
lowerCAmelCase : List[Any] = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowerCamelCase , _lowerCamelCase ) ):
lowerCAmelCase : Optional[Any] = input_row["sentence"]
lowerCAmelCase : List[str] = 1
lowerCAmelCase : str = torch.stack([row["image"] for row in batch] )
lowerCAmelCase : str = torch.stack([row["label"] for row in batch] )
lowerCAmelCase : str = torch.stack([row["image_start_token"] for row in batch] )
lowerCAmelCase : str = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def a__ ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def a__ ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 108
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''CompVis/stable-diffusion-v1-1'''
_SCREAMING_SNAKE_CASE : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
_SCREAMING_SNAKE_CASE : int = '''CompVis/stable-diffusion-v1-3'''
_SCREAMING_SNAKE_CASE : str = '''CompVis/stable-diffusion-v1-4'''
class a ( __snake_case ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : AutoencoderKL , __SCREAMING_SNAKE_CASE : CLIPTextModel , __SCREAMING_SNAKE_CASE : CLIPTokenizer , __SCREAMING_SNAKE_CASE : UNetaDConditionModel , __SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , __SCREAMING_SNAKE_CASE : CLIPImageProcessor , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]:
super()._init_()
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = StableDiffusionPipeline(
vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , requires_safety_checker=__SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCamelCase ( self : List[str] ) -> Dict[str, Any]:
return {k: getattr(self , __SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith('_' )}
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any ) -> List[Any]:
self.enable_attention_slicing(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : int , ) -> Tuple:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Optional[int]:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Tuple:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : int , ) -> str:
lowerCamelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(__SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 183
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=64 ,_snake_case=2 ,_snake_case=3 ,_snake_case=True ,_snake_case=True ,_snake_case=32 ,_snake_case=5 ,_snake_case=4 ,_snake_case=37 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=10 ,_snake_case=0.02 ,_snake_case=[1, 16, 4, 4] ,_snake_case=None ,):
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : Dict = scope
UpperCAmelCase_ : str = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase_ : Tuple = (self.image_size // 32) ** 2
UpperCAmelCase_ : int = num_patches + 1
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,backbone_featmap_shape=self.backbone_featmap_shape ,backbone_config=_snake_case ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : int = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : int = self.type_sequence_label_size
UpperCAmelCase_ : Optional[int] = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = config_and_inputs
UpperCAmelCase_ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__A : Any =(
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__A : Tuple =False
__A : List[str] =False
__A : int =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = ViTHybridModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(_snake_case )
UpperCAmelCase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Dict = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase_ : Any = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@slow
def UpperCamelCase__ ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def a__ ( ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case (unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
UpperCAmelCase_ : int = self.default_image_processor
UpperCAmelCase_ : List[Any] = prepare_img()
UpperCAmelCase_ : str = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : int = model(**_snake_case )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,_snake_case )
UpperCAmelCase_ : Optional[Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1E-4 ) )
@slow
@require_accelerate
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
UpperCAmelCase_ : List[Any] = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" ,device_map="auto" )
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : Dict = image_processor(images=_snake_case ,return_tensors="pt" )
UpperCAmelCase_ : List[str] = model(**_snake_case )
UpperCAmelCase_ : Union[str, Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase_ : Optional[int] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] ,"tabby, tabby cat" )
| 67
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ):
UpperCAmelCase_ : List[Any] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : Any = kwargs.pop("padding_side" ,"right" )
UpperCAmelCase_ : int = kwargs.pop("return_attention_mask" ,_snake_case )
super().__init__(**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = True ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = None ,):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(_snake_case ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : Dict = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_snake_case ) == 0:
if return_attention_mask:
UpperCAmelCase_ : List[str] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : Tuple = required_input[0]
if isinstance(_snake_case ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : int = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_snake_case ):
UpperCAmelCase_ : str = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_snake_case ):
UpperCAmelCase_ : Any = "tf"
elif is_torch_tensor(_snake_case ):
UpperCAmelCase_ : Optional[int] = "pt"
elif isinstance(_snake_case ,(int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : Any = "np"
else:
raise ValueError(
f'''type of {first_element} unknown: {type(_snake_case )}. '''
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
UpperCAmelCase_ : Optional[Any] = to_numpy(_snake_case )
else:
UpperCAmelCase_ : Any = [to_numpy(_snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : List[Any] = self._get_padding_strategies(padding=_snake_case ,max_length=_snake_case )
UpperCAmelCase_ : Dict = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : str = len(_snake_case )
if not all(len(_snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : Dict = []
for i in range(_snake_case ):
UpperCAmelCase_ : List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : Dict = self._truncate(
_snake_case ,max_length=_snake_case ,pad_to_multiple_of=_snake_case ,truncation=_snake_case ,)
truncated_inputs.append(_snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : List[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : str = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : Dict = {}
for i in range(_snake_case ):
# padding
UpperCAmelCase_ : Dict = self._pad(
truncated_inputs[i] ,max_length=_snake_case ,padding_strategy=_snake_case ,pad_to_multiple_of=_snake_case ,return_attention_mask=_snake_case ,)
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : str = value.astype(np.floataa )
batch_outputs[key].append(_snake_case )
return BatchFeature(_snake_case ,tensor_type=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = PaddingStrategy.DO_NOT_PAD ,_snake_case = None ,_snake_case = None ,):
UpperCAmelCase_ : Any = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Any = len(_snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : List[str] = np.ones(len(_snake_case ) ,dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Union[str, Any] = max_length - len(_snake_case )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : str = np.pad(
processed_features["attention_mask"] ,(0, difference) )
UpperCAmelCase_ : str = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : int = np.pad(
_snake_case ,_snake_case ,"constant" ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] ,(difference, 0) )
UpperCAmelCase_ : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : Union[str, Any] = np.pad(
_snake_case ,_snake_case ,"constant" ,constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : List[Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = len(_snake_case ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : Any = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : str = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self ,_snake_case=False ,_snake_case=None ):
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : str = PaddingStrategy(_snake_case )
elif isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : List[Any] = padding
else:
UpperCAmelCase_ : List[str] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 67
| 1
|
"""simple docstring"""
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = Rectangle(height=0.5 , width=0.5)
SCREAMING_SNAKE_CASE_ : Dict = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
SCREAMING_SNAKE_CASE_ : Any = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Optional[int] = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Optional[int] = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : Optional[int] = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[Any] = VGroup(lowercase_ , lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[str] = Text('''CPU''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Optional[Any] = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = [mem.copy() for i in range(1)]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : List[str] = Text('''GPU''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Optional[int] = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
gpu.align_to(lowercase_ , lowercase_)
gpu.set_x(gpu.get_x() - 1)
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [mem.copy() for i in range(6)]
SCREAMING_SNAKE_CASE_ : Tuple = VGroup(*lowercase_).arrange(lowercase_ , buff=0)
SCREAMING_SNAKE_CASE_ : Optional[int] = Text('''Model''' , font_size=24)
SCREAMING_SNAKE_CASE_ : Tuple = Group(lowercase_ , lowercase_).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_)
model.move_to([3, -1.0, 0])
self.play(
Create(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1) , Create(lowercase_ , run_time=1) , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
SCREAMING_SNAKE_CASE_ : Dict = Square(side_length=2.2)
key.move_to([-5, 2, 0])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(lowercase_ , run_time=2.5) , Write(lowercase_) , Write(lowercase_))
self.add(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[str] = []
for i, rect in enumerate(lowercase_):
SCREAMING_SNAKE_CASE_ : Any = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(lowercase_ , opacity=0.7)
cpu_target.move_to(lowercase_)
cpu_target.generate_target()
SCREAMING_SNAKE_CASE_ : Optional[int] = 0.46 / 4
SCREAMING_SNAKE_CASE_ : str = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=lowercase_)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowercase_ , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowercase_ , buff=0.0)
cpu_targs.append(lowercase_)
first_animations.append(rect.animate(run_time=0.5).set_stroke(lowercase_))
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5))
self.play(*lowercase_)
self.play(*lowercase_)
self.wait()
| 91
|
from __future__ import annotations
from collections import deque
class A :
def __init__(self : Dict , __UpperCAmelCase : list[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__UpperCAmelCase )
self.set_fail_transitions()
def lowercase_ (self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase_ (self : Dict , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
UpperCAmelCase__ = 0
for character in keyword:
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , __UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCAmelCase__ = len(self.adlist ) - 1
else:
UpperCAmelCase__ = next_state
self.adlist[current_state]["output"].append(__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = 0
while q:
UpperCAmelCase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCAmelCase )
UpperCAmelCase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(__UpperCAmelCase , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCAmelCase__ = self.adlist[state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(
__UpperCAmelCase , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCAmelCase__ = 0
UpperCAmelCase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> dict[str, list[int]]:
"""simple docstring"""
UpperCAmelCase__ = {} # returns a dict with keywords and list of its occurrences
UpperCAmelCase__ = 0
for i in range(len(__UpperCAmelCase ) ):
while (
self.find_next_state(__UpperCAmelCase , string[i] ) is None
and current_state != 0
):
UpperCAmelCase__ = self.adlist[current_state]["fail_state"]
UpperCAmelCase__ = self.find_next_state(__UpperCAmelCase , string[i] )
if next_state is None:
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCAmelCase__ = []
result[key].append(i - len(__UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
| 0
|
def lowerCAmelCase__ ( _a : int = 50_00_00_00 ):
snake_case_ : int = set()
snake_case_ : int = int((limit - 24) ** (1 / 2) )
snake_case_ : List[Any] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _a ) ) )
for primea in primes:
snake_case_ : Optional[Any] = primea * primea
for primea in primes:
snake_case_ : Optional[Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
snake_case_ : int = primea * primea * primea * primea
snake_case_ : Optional[Any] = square + cube + tetr
if total >= limit:
break
ret.add(_a )
return len(_a )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 355
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Union[str, Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[Any] = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : Tuple = size
snake_case_ : Tuple = resample
snake_case_ : Dict = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : int = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : Optional[int] = do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PIL.Image.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
snake_case_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
snake_case_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Tuple = resample if resample is not None else self.resample
snake_case_ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Optional[int] = image_std if image_std is not None else self.image_std
snake_case_ : Optional[Any] = size if size is not None else self.size
snake_case_ : int = get_size_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : str = crop_size if crop_size is not None else self.crop_size
snake_case_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" )
snake_case_ : int = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
snake_case_ : Optional[Any] = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
snake_case_ : List[Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
snake_case_ : List[str] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : int = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
snake_case_ : List[str] = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 36
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Any =logging.get_logger(__name__)
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_lowerCAmelCase = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
_lowerCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_lowerCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = dct.pop(_lowerCAmelCase )
_lowerCAmelCase = val
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
_lowerCAmelCase = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCAmelCase = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
_lowerCAmelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = ViTConfig(image_size=3_84 , qkv_bias=_lowerCAmelCase )
_lowerCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_lowerCAmelCase = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_lowerCAmelCase = 10_24
_lowerCAmelCase = 40_96
_lowerCAmelCase = 24
_lowerCAmelCase = 16
_lowerCAmelCase = 10_24
else:
raise ValueError("""Should either find \'base\' or \'large\' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCAmelCase = False
_lowerCAmelCase = 'relu'
_lowerCAmelCase = 10_24
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
# load HuggingFace model
_lowerCAmelCase = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
_lowerCAmelCase = TrOCRForCausalLM(_lowerCAmelCase )
_lowerCAmelCase = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
_lowerCAmelCase = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="""cpu""" , check_hash=_lowerCAmelCase )['model']
_lowerCAmelCase = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_lowerCAmelCase = state_dict.pop(_lowerCAmelCase )
if key.startswith("""decoder""" ) and "output_projection" not in key:
_lowerCAmelCase = val
else:
_lowerCAmelCase = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
_lowerCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
_lowerCAmelCase = RobertaTokenizer.from_pretrained("""roberta-large""" )
_lowerCAmelCase = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
# verify logits
_lowerCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_lowerCAmelCase = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
_lowerCAmelCase = outputs.logits
_lowerCAmelCase = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_lowerCAmelCase = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
_lowerCAmelCase = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
_lowerCAmelCase = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
_lowerCAmelCase = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
A__ : str =argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
A__ : List[Any] =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 70
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : Union[str, Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase ,output_loading_info=_lowerCAmelCase )
else:
__lowerCamelCase : Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase ,output_loading_info=_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = ['key_proj', 'value_proj', 'query_proj']
__lowerCamelCase : Optional[Any] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : Optional[int] = key.split('.' )
if attributes[0] == "lm_head":
__lowerCamelCase : Dict = prophet
__lowerCamelCase : List[Any] = prophet_old
else:
__lowerCamelCase : Any = prophet.prophetnet
__lowerCamelCase : Any = prophet_old.model
__lowerCamelCase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : Any = mapping[attribute]
if not hasattr(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
__lowerCamelCase : int = attribute
elif hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : List[Any] = old_model.weight
logger.info(F'{attribute} is initialized.' )
__lowerCamelCase : List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : List[Any] = old_model.bias
logger.info(F'{attribute} is initialized' )
__lowerCamelCase : Dict = True
break
elif attribute in special_keys and hasattr(_lowerCAmelCase ,'in_proj_weight' ):
__lowerCamelCase : Optional[Any] = old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : Optional[Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : str = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Optional[int] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict = True
break
if attribute.isdigit():
__lowerCamelCase : List[str] = model[int(_lowerCAmelCase )]
__lowerCamelCase : Union[str, Any] = old_model[int(_lowerCAmelCase )]
else:
__lowerCamelCase : Union[str, Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if old_attribute == "":
__lowerCamelCase : str = old_model
else:
if not hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__lowerCamelCase : str = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 208
| 0
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
UpperCAmelCase : Dict[Optional[str], str] = {}
UpperCAmelCase : Dict[Optional[str], Exception] = {}
def lowerCamelCase ( _UpperCamelCase : type , _UpperCamelCase : Optional[str] , _UpperCamelCase : Optional[List[str]] = None , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__UpperCAmelCase : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__UpperCAmelCase : List[Any] = format_type
def lowerCamelCase ( _UpperCamelCase : Exception , _UpperCamelCase : Optional[str] , _UpperCamelCase : Optional[List[str]] = None ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__UpperCAmelCase : Optional[Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
UpperCAmelCase : Dict = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
UpperCAmelCase : Optional[Any] = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
UpperCAmelCase : str = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def lowerCamelCase ( _UpperCamelCase : Optional[str] ) -> Optional[str]:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase ( _UpperCamelCase : Optional[str] , **_UpperCamelCase : int ) -> Formatter:
'''simple docstring'''
__UpperCAmelCase : Dict = get_format_type_from_alias(_UpperCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_UpperCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 320
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase : str ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase )
__UpperCAmelCase : int = list(model.children() )[:-2]
__UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase )
__UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) )
__UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 )
__UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )]
__UpperCAmelCase : Any = os.path.dirname(UpperCamelCase )
__UpperCAmelCase : List[str] = tokenizer
__UpperCAmelCase : str = labels
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
__UpperCAmelCase : int = max_seq_length
__UpperCAmelCase : int = transforms
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : List[str] , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Any = sentence[: self.max_seq_length]
__UpperCAmelCase : Tuple = torch.zeros(self.n_classes )
__UpperCAmelCase : str = 1
__UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch]
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase )
__UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
__UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
__UpperCAmelCase : List[str] = input_row["""sentence"""]
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] )
__UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] )
__UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ) -> int:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 320
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a ={"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 73
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "mvp"
UpperCAmelCase__ : Tuple = ["past_key_values"]
UpperCAmelCase__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=50267 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , A_=False , A_=100 , A_=800 , **A_ , ) -> Union[str, Any]:
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =d_model
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =decoder_layers
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =classifier_dropout
__UpperCamelCase =use_cache
__UpperCamelCase =encoder_layers
__UpperCamelCase =scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase =use_prompt
__UpperCamelCase =prompt_length
__UpperCamelCase =prompt_mid_dim
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , A_ ):
__UpperCamelCase =self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 62
| 0
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCAmelCase_ ( _lowercase : Optional[int]=None) -> List[str]:
"""simple docstring"""
if subparsers is not None:
a__ : str = subparsers.add_parser("""env""")
else:
a__ : List[Any] = argparse.ArgumentParser("""Accelerate env command""")
parser.add_argument(
"""--config_file""" , default=_lowercase , help="""The config file to use for the default values in the launching script.""")
if subparsers is not None:
parser.set_defaults(func=_lowercase)
return parser
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> Tuple:
"""simple docstring"""
a__ : Optional[Any] = torch.__version__
a__ : List[Any] = torch.cuda.is_available()
a__ : Any = is_xpu_available()
a__ : Union[str, Any] = is_npu_available()
a__ : Optional[int] = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_lowercase):
a__ : List[Any] = load_config_from_file(args.config_file).to_dict()
a__ : Optional[Any] = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(_lowercase),
"""PyTorch NPU available""": str(_lowercase),
"""System RAM""": F'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''',
}
if pt_cuda_available:
a__ : str = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""")
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()]))
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""")
a__ : List[str] = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()])
if isinstance(_lowercase , _lowercase)
else F'''\t{accelerate_config}'''
)
print(_lowercase)
a__ : Dict = accelerate_config
return info
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
a__ : List[Any] = env_command_parser()
a__ : Any = parser.parse_args()
env_command(_lowercase)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 266
|
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : int) -> list[int]:
"""simple docstring"""
if num <= 0:
a__ : Tuple = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(_lowercase)
a__ : List[Any] = [True] * (num + 1)
a__ : List[str] = []
a__ : List[Any] = 2
a__ : Optional[int] = int(math.sqrt(_lowercase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_lowercase)
# Set multiples of start be False
for i in range(start * start , num + 1 , _lowercase):
if sieve[i] is True:
a__ : Optional[int] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(_lowercase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 266
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 17
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__a : List[str] = Lock()
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__lowercase = min(lowercase , lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__lowercase = max(lowercase , lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowercase )
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = []
__lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__lowercase = Pipe()
__lowercase = Pipe()
process_array_.append(
Process(
target=lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__lowercase = temp_rs
__lowercase = temp_rr
for i in range(1 , len(lowercase ) - 1 ):
__lowercase = Pipe()
__lowercase = Pipe()
process_array_.append(
Process(
target=lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__lowercase = temp_rs
__lowercase = temp_rr
process_array_.append(
Process(
target=lowercase , args=(
len(lowercase ) - 1,
arr[len(lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowercase ) ):
__lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*lowercase )
__lowercase = odd_even_transposition(lowercase )
print('''Sorted List\n''' )
print(*lowercase )
if __name__ == "__main__":
main()
| 210
| 0
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowercase : List[str] = logging.getLogger(__name__)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'summarization'
_A = ['loss']
_A = ROUGE_KEYS
_A = 'rouge2'
def __init__( self :Optional[Any] , a :str , **a :Any ) -> int:
if hparams.sortish_sampler and hparams.gpus > 1:
__UpperCamelCase : int = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(a , num_labels=a , mode=self.mode , **a )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
__UpperCamelCase : Union[str, Any] = Path(self.output_dir ) / "metrics.json"
__UpperCamelCase : Union[str, Any] = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
__UpperCamelCase : Any = 0
__UpperCamelCase : List[Any] = defaultdict(a )
__UpperCamelCase : List[str] = self.config.model_type
__UpperCamelCase : Optional[Any] = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
__UpperCamelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
__UpperCamelCase : Any = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
__UpperCamelCase : Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
__UpperCamelCase : List[Any] = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
__UpperCamelCase : List[Any] = get_git_info()["repo_sha"]
__UpperCamelCase : str = hparams.num_workers
__UpperCamelCase : Any = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , a ):
__UpperCamelCase : List[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
__UpperCamelCase : Dict = self.decoder_start_token_id
__UpperCamelCase : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
__UpperCamelCase : int = False
__UpperCamelCase : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
__UpperCamelCase : Optional[Any] = self.hparams.eval_max_gen_length
else:
__UpperCamelCase : int = self.model.config.max_length
__UpperCamelCase : str = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _lowerCamelCase ( self :Dict , a :Dict[str, torch.Tensor] ) -> Dict[str, List[str]]:
__UpperCamelCase : Optional[int] = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(a , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
__UpperCamelCase : List[Any] = True
return readable_batch
def _lowerCamelCase ( self :Tuple , a :Union[str, Any] , **a :Optional[Any] ) -> Any:
return self.model(a , **a )
def _lowerCamelCase ( self :str , a :List[int] ) -> Tuple:
__UpperCamelCase : Tuple = self.tokenizer.batch_decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
return lmap(str.strip , a )
def _lowerCamelCase ( self :List[Any] , a :dict ) -> Tuple:
__UpperCamelCase : List[str] = self.tokenizer.pad_token_id
__UpperCamelCase , __UpperCamelCase : Optional[Any] = batch["input_ids"], batch["attention_mask"]
__UpperCamelCase : Optional[int] = batch["labels"]
if isinstance(self.model , a ):
__UpperCamelCase : str = self.model._shift_right(a )
else:
__UpperCamelCase : Tuple = shift_tokens_right(a , a )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
__UpperCamelCase : List[str] = decoder_input_ids
self.save_readable_batch(a )
__UpperCamelCase : List[str] = self(a , attention_mask=a , decoder_input_ids=a , use_cache=a )
__UpperCamelCase : Optional[int] = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
__UpperCamelCase : Optional[int] = nn.CrossEntropyLoss(ignore_index=a )
assert lm_logits.shape[-1] == self.vocab_size
__UpperCamelCase : List[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
__UpperCamelCase : Dict = nn.functional.log_softmax(a , dim=-1 )
__UpperCamelCase , __UpperCamelCase : Dict = label_smoothed_nll_loss(
a , a , self.hparams.label_smoothing , ignore_index=a )
return (loss,)
@property
def _lowerCamelCase ( self :List[str] ) -> int:
return self.tokenizer.pad_token_id
def _lowerCamelCase ( self :Optional[int] , a :Dict , a :Dict ) -> Dict:
__UpperCamelCase : Tuple = self._step(a )
__UpperCamelCase : List[str] = dict(zip(self.loss_names , a ) )
# tokens per batch
__UpperCamelCase : Optional[Any] = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
__UpperCamelCase : Any = batch["input_ids"].shape[0]
__UpperCamelCase : List[Any] = batch["input_ids"].eq(self.pad ).sum()
__UpperCamelCase : Optional[Any] = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _lowerCamelCase ( self :Tuple , a :Optional[Any] , a :Dict ) -> Dict:
return self._generative_step(a )
def _lowerCamelCase ( self :List[str] , a :Optional[int] , a :Dict="val" ) -> Dict:
self.step_count += 1
__UpperCamelCase : Optional[int] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
__UpperCamelCase : Optional[int] = losses["loss"]
__UpperCamelCase : List[str] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
__UpperCamelCase : Any = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
__UpperCamelCase : torch.FloatTensor = torch.tensor(a ).type_as(a )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(a )
__UpperCamelCase : Dict = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
__UpperCamelCase : Optional[int] = self.step_count
self.metrics[prefix].append(a ) # callback writes this to self.metrics_save_path
__UpperCamelCase : Optional[Any] = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def _lowerCamelCase ( self :int , a :str , a :List[Any] ) -> Dict:
return calculate_rouge(a , a )
def _lowerCamelCase ( self :int , a :dict ) -> dict:
__UpperCamelCase : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
__UpperCamelCase : Union[str, Any] = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=a , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
__UpperCamelCase : Tuple = (time.time() - ta) / batch["input_ids"].shape[0]
__UpperCamelCase : List[str] = self.ids_to_clean_text(a )
__UpperCamelCase : List[str] = self.ids_to_clean_text(batch["labels"] )
__UpperCamelCase : int = self._step(a )
__UpperCamelCase : Optional[int] = dict(zip(self.loss_names , a ) )
__UpperCamelCase : Dict = self.calc_generative_metrics(a , a )
__UpperCamelCase : Tuple = np.mean(lmap(a , a ) )
base_metrics.update(gen_time=a , gen_len=a , preds=a , target=a , **a )
return base_metrics
def _lowerCamelCase ( self :Union[str, Any] , a :List[str] , a :Optional[Any] ) -> List[Any]:
return self._generative_step(a )
def _lowerCamelCase ( self :str , a :Any ) -> List[Any]:
return self.validation_epoch_end(a , prefix="test" )
def _lowerCamelCase ( self :int , a :Any ) -> SeqaSeqDataset:
__UpperCamelCase : List[Any] = self.n_obs[type_path]
__UpperCamelCase : str = self.target_lens[type_path]
__UpperCamelCase : Optional[int] = self.dataset_class(
self.tokenizer , type_path=a , n_obs=a , max_target_length=a , **self.dataset_kwargs , )
return dataset
def _lowerCamelCase ( self :str , a :str , a :int , a :bool = False ) -> DataLoader:
__UpperCamelCase : List[Any] = self.get_dataset(a )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
__UpperCamelCase : List[Any] = dataset.make_sortish_sampler(a , distributed=self.hparams.gpus > 1 )
return DataLoader(
a , batch_size=a , collate_fn=dataset.collate_fn , shuffle=a , num_workers=self.num_workers , sampler=a , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
__UpperCamelCase : Any = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
a , batch_sampler=a , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
a , batch_size=a , collate_fn=dataset.collate_fn , shuffle=a , num_workers=self.num_workers , sampler=a , )
def _lowerCamelCase ( self :Dict ) -> DataLoader:
__UpperCamelCase : int = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=a )
return dataloader
def _lowerCamelCase ( self :Optional[Any] ) -> DataLoader:
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def _lowerCamelCase ( self :Optional[int] ) -> DataLoader:
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _lowerCamelCase ( a :Dict , a :Tuple ) -> Tuple:
BaseTransformer.add_model_specific_args(a , a )
add_generic_args(a , a )
parser.add_argument(
"--max_source_length" , default=1_0_2_4 , type=a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=5_6 , type=a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=1_4_2 , type=a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=1_4_2 , type=a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=a )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=a )
parser.add_argument("--max_tokens_per_batch" , type=a , default=a )
parser.add_argument("--logger_name" , type=a , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=a , default=-1 , required=a , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=a , default=5_0_0 , required=a , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=a , default=-1 , required=a , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=a , default="summarization" , required=a , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=a , default=0.0 , required=a )
parser.add_argument("--src_lang" , type=a , default="" , required=a )
parser.add_argument("--tgt_lang" , type=a , default="" , required=a )
parser.add_argument("--eval_beams" , type=a , default=a , required=a )
parser.add_argument(
"--val_metric" , type=a , default=a , required=a , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=a , default=a , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=a , default=1 , required=a , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=a , default=-1 , required=a , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'translation'
_A = ['loss']
_A = ['bleu']
_A = 'bleu'
def __init__( self :Tuple , a :List[str] , **a :Tuple ) -> Dict:
super().__init__(a , **a )
__UpperCamelCase : Any = hparams.src_lang
__UpperCamelCase : Dict = hparams.tgt_lang
def _lowerCamelCase ( self :str , a :Optional[int] , a :List[Any] ) -> dict:
return calculate_bleu(a , a )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Optional[int]=None) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir).mkdir(exist_ok=_lowerCamelCase)
check_output_dir(_lowerCamelCase , expected_items=3)
if model is None:
if "summarization" in args.task:
__UpperCamelCase : SummarizationModule = SummarizationModule(_lowerCamelCase)
else:
__UpperCamelCase : SummarizationModule = TranslationModule(_lowerCamelCase)
__UpperCamelCase : int = Path(args.data_dir).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir).startswith("/tmp")
or str(args.output_dir).startswith("/var")
):
__UpperCamelCase : Optional[int] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
__UpperCamelCase : Dict = os.environ.get("WANDB_PROJECT" , _lowerCamelCase)
__UpperCamelCase : List[Any] = WandbLogger(name=model.output_dir.name , project=_lowerCamelCase)
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
__UpperCamelCase : int = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}')
if args.early_stopping_patience >= 0:
__UpperCamelCase : Dict = get_early_stopping_callback(model.val_metric , args.early_stopping_patience)
else:
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Union[str, Any] = args.val_metric == "loss"
__UpperCamelCase : pl.Trainer = generic_train(
_lowerCamelCase , _lowerCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowerCamelCase) , early_stopping_callback=_lowerCamelCase , logger=_lowerCamelCase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl")
if not args.do_predict:
return model
__UpperCamelCase : Optional[Any] = ""
__UpperCamelCase : int = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt") , recursive=_lowerCamelCase))
if checkpoints:
__UpperCamelCase : Union[str, Any] = checkpoints[-1]
__UpperCamelCase : Union[str, Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams)
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
lowercase : Optional[int] = pl.Trainer.add_argparse_args(parser)
lowercase : Any = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowercase : int = parser.parse_args()
main(args)
| 151
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"])
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple) -> Optional[Any]:
'''simple docstring'''
inspect_dataset(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : int = path + ".py"
assert script_name in os.listdir(_lowerCamelCase)
assert "__pycache__" not in os.listdir(_lowerCamelCase)
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.parametrize("path" , ["accuracy"])
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str) -> Optional[Any]:
'''simple docstring'''
inspect_metric(_lowerCamelCase , _lowerCamelCase)
__UpperCamelCase : Dict = path + ".py"
assert script_name in os.listdir(_lowerCamelCase)
assert "__pycache__" not in os.listdir(_lowerCamelCase)
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Any) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = get_dataset_config_info(_lowerCamelCase , config_name=_lowerCamelCase)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : int) -> List[str]:
'''simple docstring'''
with pytest.raises(_lowerCamelCase):
get_dataset_config_info(_lowerCamelCase , config_name=_lowerCamelCase)
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = get_dataset_config_names(_lowerCamelCase)
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : int) -> Tuple:
'''simple docstring'''
__UpperCamelCase : List[Any] = get_dataset_infos(_lowerCamelCase)
assert list(infos.keys()) == expected_configs
__UpperCamelCase : int = expected_configs[0]
assert expected_config in infos
__UpperCamelCase : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = get_dataset_infos(_lowerCamelCase)
assert expected_config in infos
__UpperCamelCase : Optional[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple) -> Dict:
'''simple docstring'''
with pytest.raises(_lowerCamelCase):
get_dataset_split_names(_lowerCamelCase , config_name=_lowerCamelCase)
| 151
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = ['''image_processor''', '''feature_extractor''']
UpperCamelCase = '''TvltImageProcessor'''
UpperCamelCase = '''TvltFeatureExtractor'''
def __init__( self :List[str] , __UpperCamelCase :Optional[int] , __UpperCamelCase :Union[str, Any] ):
super().__init__(image_processor=__UpperCamelCase , feature_extractor=__UpperCamelCase )
A = image_processor
A = feature_extractor
def __call__( self :Optional[int] , __UpperCamelCase :List[Any]=None , __UpperCamelCase :List[Any]=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :int=None , __UpperCamelCase :List[str]=False , __UpperCamelCase :int=False , *__UpperCamelCase :List[Any] , **__UpperCamelCase :List[str] , ):
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
A = None
if images is not None:
A = self.image_processor(__UpperCamelCase , mask_pixel=__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if images_mixed is not None:
A = self.image_processor(__UpperCamelCase , is_mixed=__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if audio is not None:
A = self.feature_extractor(
__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , mask_audio=__UpperCamelCase , **__UpperCamelCase )
A = {}
if audio is not None:
output_dict.update(__UpperCamelCase )
if images is not None:
output_dict.update(__UpperCamelCase )
if images_mixed_dict is not None:
output_dict.update(__UpperCamelCase )
return output_dict
@property
def lowerCamelCase ( self :Tuple ):
A = self.image_processor.model_input_names
A = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 292
|
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ):
A = name
A = val
def __str__( self :str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ):
A = {}
A = {}
A = self.build_heap(__UpperCamelCase )
def __getitem__( self :int , __UpperCamelCase :Optional[int] ):
return self.get_value(__UpperCamelCase )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ):
return (idx - 1) // 2
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
return idx * 2 + 1
def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ):
return idx * 2 + 2
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ):
return self.heap_dict[key]
def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ):
A = len(__UpperCamelCase ) - 1
A = self.get_parent_idx(__UpperCamelCase )
for idx, i in enumerate(__UpperCamelCase ):
A = idx
A = i.val
for i in range(__UpperCamelCase , -1 , -1 ):
self.sift_down(__UpperCamelCase , __UpperCamelCase )
return array
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ):
while True:
A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741
A = self.get_right_child_idx(__UpperCamelCase )
A = idx
if l < len(__UpperCamelCase ) and array[l] < array[idx]:
A = l
if r < len(__UpperCamelCase ) and array[r] < array[smallest]:
A = r
if smallest != idx:
A, A = array[smallest], array[idx]
(
(
A
), (
A
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A = smallest
else:
break
def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ):
A = self.get_parent_idx(__UpperCamelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
A, A = self.heap[idx], self.heap[p]
A, A = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A = p
A = self.get_parent_idx(__UpperCamelCase )
def lowerCamelCase ( self :Any ):
return self.heap[0]
def lowerCamelCase ( self :Tuple ):
A, A = self.heap[-1], self.heap[0]
A, A = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ):
self.heap.append(__UpperCamelCase )
A = len(self.heap ) - 1
A = node.val
self.sift_up(len(self.heap ) - 1 )
def lowerCamelCase ( self :Tuple ):
return len(self.heap ) == 0
def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A = new_value
A = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case : Optional[int] = Node('R', -1)
_snake_case : Tuple = Node('B', 6)
_snake_case : Tuple = Node('A', 3)
_snake_case : Optional[int] = Node('X', 1)
_snake_case : List[Any] = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case : Tuple = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292
| 1
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Tuple = 16
lowerCAmelCase : Dict = 32
def a__ ( snake_case__ , snake_case__ = 16 ) -> Optional[Any]:
lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase = 8
else:
lowerCamelCase = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def a__ ( snake_case__ , snake_case__ ) -> Tuple:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
lowerCamelCase = 2
# Initialize accelerator
lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase = config["""lr"""]
lowerCamelCase = int(config["""num_epochs"""] )
lowerCamelCase = int(config["""seed"""] )
lowerCamelCase = int(config["""batch_size"""] )
lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(snake_case__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase = AdamW(params=model.parameters() , lr=snake_case__ )
lowerCamelCase , lowerCamelCase = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate scheduler
lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase = model(**snake_case__ )
lowerCamelCase = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase = model(**snake_case__ )
lowerCamelCase = outputs.logits.argmax(dim=-1 )
lowerCamelCase , lowerCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> str:
lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCamelCase = parser.parse_args()
lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 371
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = None
def __init__( self , _a=None , _a=None , _a=None , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a=False , _a=False , **_a , ):
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , add_prefix_space=_a , clean_up_tokenization_spaces=_a , **_a , )
lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _a ) != add_prefix_space:
lowerCamelCase = getattr(_a , pre_tok_state.pop("""type""" ) )
lowerCamelCase = add_prefix_space
lowerCamelCase = pre_tok_class(**_a )
lowerCamelCase = add_prefix_space
def _lowerCAmelCase ( self , *_a , **_a ):
"""simple docstring"""
lowerCamelCase = kwargs.get("""is_split_into_words""" , _a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*_a , **_a )
def _lowerCAmelCase ( self , *_a , **_a ):
"""simple docstring"""
lowerCamelCase = kwargs.get("""is_split_into_words""" , _a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*_a , **_a )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 168
| 0
|
'''simple docstring'''
import random
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
A__ : List[Any] =num - 1
A__ : Union[str, Any] =0
while s % 2 == 0:
A__ : Dict =s // 2
t += 1
for _ in range(5 ):
A__ : Union[str, Any] =random.randrange(2, num - 1 )
A__ : List[str] =pow(__snake_case, __snake_case, __snake_case )
if v != 1:
A__ : Tuple =0
while v != (num - 1):
if i == t - 1:
return False
else:
A__ : Optional[Any] =i + 1
A__ : List[str] =(v**2) % num
return True
def __lowerCamelCase ( __snake_case : int ) -> bool:
"""simple docstring"""
if num < 2:
return False
A__ : Any =[
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__snake_case )
def __lowerCamelCase ( __snake_case : int = 1_024 ) -> int:
"""simple docstring"""
while True:
A__ : int =random.randrange(2 ** (keysize - 1), 2 ** (keysize) )
if is_prime_low_num(__snake_case ):
return num
if __name__ == "__main__":
__snake_case : Optional[Any] = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 134
|
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ProphetNetTokenizer
__snake_case = False
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
super().setUp()
a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ) ->Dict:
"""simple docstring"""
a = '''UNwant\u00E9d,running'''
a = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
a = {}
for i, token in enumerate(__UpperCAmelCase ):
a = i
a = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0
| 0
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 281
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case : Any = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
inspect_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
a :List[Any] = path + '''.py'''
assert script_name in os.listdir(UpperCAmelCase_ )
assert "__pycache__" not in os.listdir(UpperCAmelCase_ )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any ):
"""simple docstring"""
inspect_metric(UpperCAmelCase_ , UpperCAmelCase_ )
a :Dict = path + '''.py'''
assert script_name in os.listdir(UpperCAmelCase_ )
assert "__pycache__" not in os.listdir(UpperCAmelCase_ )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
a :List[str] = get_dataset_config_info(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
with pytest.raises(UpperCAmelCase_ ):
get_dataset_config_info(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int ):
"""simple docstring"""
a :List[str] = get_dataset_config_names(UpperCAmelCase_ )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Optional[int] = get_dataset_infos(UpperCAmelCase_ )
assert list(infos.keys() ) == expected_configs
a :Union[str, Any] = expected_configs[0]
assert expected_config in infos
a :List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :Union[str, Any] = get_dataset_infos(UpperCAmelCase_ )
assert expected_config in infos
a :int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str ):
"""simple docstring"""
with pytest.raises(UpperCAmelCase_ ):
get_dataset_split_names(UpperCAmelCase_ , config_name=UpperCAmelCase_ )
| 281
| 1
|
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Any = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Tuple = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Union[str, Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Optional[Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Dict = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : List[Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Dict = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : str = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : int = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Union[str, Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : int = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Union[str, Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : str = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Optional[int] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Optional[Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Optional[Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : List[str] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : int = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Tuple = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : str = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : List[Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Union[str, Any] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : int = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : List[str] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Tuple = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : int = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Optional[int] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Any = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : List[str] = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : Tuple = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
requires_backends(self , ["""sentencepiece"""] )
class snake_case__ ( metaclass=_lowerCAmelCase ):
lowercase__ : int = ['''sentencepiece''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
requires_backends(self , ["""sentencepiece"""] )
| 342
|
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(_A, _A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 342
| 1
|
"""simple docstring"""
from ....utils import logging
a_ = logging.get_logger(__name__)
class __snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=2048 ):
'''simple docstring'''
__A : Dict = config.__dict__
__A : str = modal_hidden_size
if num_labels:
__A : Optional[Any] = num_labels
| 358
|
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , **__lowerCamelCase ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
return super().__call__(__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , **__lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = {}
if "candidate_labels" in kwargs:
__A : Tuple = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__A : List[str] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase="This is a photo of {}." ):
'''simple docstring'''
__A : Optional[int] = load_image(__lowerCamelCase )
__A : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework )
__A : int = candidate_labels
__A : int = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
__A : Dict = self.tokenizer(__lowerCamelCase , return_tensors=self.framework , padding=__lowerCamelCase )
__A : int = [text_inputs]
return inputs
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = model_inputs.pop('''candidate_labels''' )
__A : str = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __lowerCamelCase ):
__A : Union[str, Any] = text_inputs[0]
else:
# Batching case.
__A : str = text_inputs[0][0]
__A : List[str] = self.model(**__lowerCamelCase , **__lowerCamelCase )
__A : Dict = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = model_outputs.pop('''candidate_labels''' )
__A : int = model_outputs['''logits'''][0]
if self.framework == "pt":
__A : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
__A : Dict = probs.tolist()
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__A : List[Any] = [scores]
elif self.framework == "tf":
__A : List[Any] = stable_softmax(__lowerCamelCase , axis=-1 )
__A : str = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
__A : str = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase , __lowerCamelCase ) , key=lambda __lowerCamelCase : -x[0] )
]
return result
| 291
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCAmelCase ={
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] ="gpt_neox_japanese"
def __init__( self : List[Any] , a : Tuple=3_20_00 , a : Dict=25_60 , a : Union[str, Any]=32 , a : Dict=32 , a : Dict=4 , a : Optional[Any]="gelu" , a : Any=1.00 , a : str=1_00_00 , a : List[str]=20_48 , a : str=0.02 , a : Union[str, Any]=1e-5 , a : Optional[Any]=True , a : str=3_19_96 , a : List[str]=3_19_99 , a : str=0.1 , a : Union[str, Any]=0.0 , **a : Optional[Any] , ):
"""simple docstring"""
super().__init__(bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_multiple_size
__lowerCamelCase = hidden_act
__lowerCamelCase = rotary_pct
__lowerCamelCase = rotary_emb_base
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = attention_dropout
__lowerCamelCase = hidden_dropout
| 67
| 1
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = """ResNetConfig"""
# Base docstring
lowercase_ = """microsoft/resnet-50"""
lowercase_ = [1, 2_048, 7, 7]
# Image classification docstring
lowercase_ = """microsoft/resnet-50"""
lowercase_ = """tiger cat"""
lowercase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[Any] , a : Tuple , a : List[str] , a : Optional[int] = 3 , a : Optional[Any] = 1 , a : Optional[Any] = "relu" )-> int:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Convad(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
lowercase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[int] )-> Tensor:
"""simple docstring"""
lowercase__ = self.convolution(_SCREAMING_SNAKE_CASE )
lowercase__ = self.normalization(_SCREAMING_SNAKE_CASE )
lowercase__ = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[str] , a : List[Any] )-> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowercase__ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowercase__ = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : int , a : Dict )-> Tensor:
"""simple docstring"""
lowercase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase__ = self.embedder(_SCREAMING_SNAKE_CASE )
lowercase__ = self.pooler(_SCREAMING_SNAKE_CASE )
return embedding
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[str] , a : int , a : List[str] , a : Union[str, Any] = 2 )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , stride=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Any )-> Tensor:
"""simple docstring"""
lowercase__ = self.convolution(_SCREAMING_SNAKE_CASE )
lowercase__ = self.normalization(_SCREAMING_SNAKE_CASE )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Tuple , a : str , a : str , a : int = 1 , a : Any = "relu" )-> Any:
"""simple docstring"""
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=_SCREAMING_SNAKE_CASE ) , )
lowercase__ = ACTaFN[activation]
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Optional[Any] )-> str:
"""simple docstring"""
lowercase__ = hidden_state
lowercase__ = self.layer(_SCREAMING_SNAKE_CASE )
lowercase__ = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
lowercase__ = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Optional[Any] , a : Dict , a : Tuple , a : str = 1 , a : str = "relu" , a : Dict = 4 )-> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = out_channels // reduction
lowercase__ = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , )
lowercase__ = ACTaFN[activation]
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = hidden_state
lowercase__ = self.layer(_SCREAMING_SNAKE_CASE )
lowercase__ = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
lowercase__ = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Union[str, Any] , a : Any , a : Optional[int] , a : List[Any] , a : Dict = 2 , a : List[str] = 2 , )-> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
lowercase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , *[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[Any] )-> Tensor:
"""simple docstring"""
lowercase__ = input
for layer in self.layers:
lowercase__ = layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Dict , a : List[str] )-> str:
"""simple docstring"""
super().__init__()
lowercase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(ResNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Union[str, Any] , a : Tuple = False , a : Union[str, Any] = True )-> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
lowercase__ = stage_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE , )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Optional[Any] = ResNetConfig
_UpperCamelCase : Dict = 'resnet'
_UpperCamelCase : int = 'pixel_values'
_UpperCamelCase : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : int , a : Dict=False )-> Union[str, Any]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = value
lowercase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowercase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , UpperCAmelCase , )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
lowercase__ = config
lowercase__ = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
lowercase__ = ResNetEncoder(_SCREAMING_SNAKE_CASE )
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[int] , a : Dict = None , a : str = None )-> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.embedder(_SCREAMING_SNAKE_CASE )
lowercase__ = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
lowercase__ = encoder_outputs[0]
lowercase__ = self.pooler(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase , )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Union[str, Any] , a : Optional[int] )-> int:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
lowercase__ = config.num_labels
lowercase__ = ResNetModel(_SCREAMING_SNAKE_CASE )
# classification head
lowercase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Optional[int] = None , a : int = None , a : List[str] = None , a : Tuple = None , )-> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.resnet(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.pooler_output if return_dict else outputs[1]
lowercase__ = self.classifier(_SCREAMING_SNAKE_CASE )
lowercase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ = '''single_label_classification'''
else:
lowercase__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ = MSELoss()
if self.num_labels == 1:
lowercase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ = BCEWithLogitsLoss()
lowercase__ = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , UpperCAmelCase , )
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase ):
def __init__( self : Tuple , a : Dict )-> int:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE )
super()._init_backbone(_SCREAMING_SNAKE_CASE )
lowercase__ = [config.embedding_size] + config.hidden_sizes
lowercase__ = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
lowercase__ = ResNetEncoder(_SCREAMING_SNAKE_CASE )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Tuple , a : Dict = None , a : str = None )-> BackboneOutput:
"""simple docstring"""
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = self.embedder(_SCREAMING_SNAKE_CASE )
lowercase__ = self.encoder(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.hidden_states
lowercase__ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowercase__ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_SCREAMING_SNAKE_CASE , )
| 371
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 269
| 0
|
"""simple docstring"""
import math
from collections.abc import Callable
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = xa
A__ = xa
while True:
if x_n == x_na or function(_lowerCamelCase ) == function(_lowerCamelCase ):
raise ZeroDivisionError('float division by zero, could not find root' )
A__ = x_na - (
function(_lowerCamelCase ) / ((function(_lowerCamelCase ) - function(_lowerCamelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A__ = x_na
A__ = x_na
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return math.pow(_lowerCamelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 221
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_lowerCAmelCase : List[str] = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def A ( _lowerCamelCase ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCAmelCase : Optional[int] = gray_code_sequence_string(bit_count - 1 )
_lowerCAmelCase : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCAmelCase : Dict = "0" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCAmelCase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = MODEL_FOR_CAUSAL_LM_MAPPING
__magic_name__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase_ : Tuple = text_generator("This is a test" , do_sample=lowerCAmelCase_ )
self.assertEqual(
lowerCAmelCase_ , [
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
] , )
UpperCAmelCase_ : Tuple = text_generator(["This is a test", "This is a second test"] )
self.assertEqual(
lowerCAmelCase_ , [
[
{
"generated_text": (
"This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."
" oscope. FiliFili@@"
)
}
],
[
{
"generated_text": (
"This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"
" oscope. oscope. FiliFili@@"
)
}
],
] , )
UpperCAmelCase_ : Tuple = text_generator("This is a test" , do_sample=lowerCAmelCase_ , num_return_sequences=2 , return_tensors=lowerCAmelCase_ )
self.assertEqual(
lowerCAmelCase_ , [
{"generated_token_ids": ANY(lowerCAmelCase_ )},
{"generated_token_ids": ANY(lowerCAmelCase_ )},
] , )
UpperCAmelCase_ : Optional[Any] = text_generator.model.config.eos_token_id
UpperCAmelCase_ : Dict = "<pad>"
UpperCAmelCase_ : Optional[Any] = text_generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase_ , )
self.assertEqual(
lowerCAmelCase_ , [
[
{"generated_token_ids": ANY(lowerCAmelCase_ )},
{"generated_token_ids": ANY(lowerCAmelCase_ )},
],
[
{"generated_token_ids": ANY(lowerCAmelCase_ )},
{"generated_token_ids": ANY(lowerCAmelCase_ )},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
UpperCAmelCase_ : List[Any] = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" )
# Using `do_sample=False` to force deterministic output
UpperCAmelCase_ : int = text_generator("This is a test" , do_sample=lowerCAmelCase_ )
self.assertEqual(
lowerCAmelCase_ , [
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
] , )
UpperCAmelCase_ : str = text_generator(["This is a test", "This is a second test"] , do_sample=lowerCAmelCase_ )
self.assertEqual(
lowerCAmelCase_ , [
[
{
"generated_text": (
"This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"
" please,"
)
}
],
[
{
"generated_text": (
"This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"
" Cannes 閲閲Cannes Cannes Cannes 攵 please,"
)
}
],
] , )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : int = TextGenerationPipeline(model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
return text_generator, ["This is a test", "Another test"]
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
UpperCAmelCase_ : Optional[Any] = "Hello I believe in"
UpperCAmelCase_ : Tuple = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
UpperCAmelCase_ : Optional[Any] = text_generator(lowerCAmelCase_ )
self.assertEqual(
lowerCAmelCase_ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , )
UpperCAmelCase_ : Union[str, Any] = text_generator(lowerCAmelCase_ , stop_sequence=" fe" )
self.assertEqual(lowerCAmelCase_ , [{"generated_text": "Hello I believe in fe"}] )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = text_generator.model
UpperCAmelCase_ : Optional[Any] = text_generator.tokenizer
UpperCAmelCase_ : Dict = text_generator("This is a test" )
self.assertEqual(lowerCAmelCase_ , [{"generated_text": ANY(lowerCAmelCase_ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCAmelCase_ : Union[str, Any] = text_generator("This is a test" , return_full_text=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , [{"generated_text": ANY(lowerCAmelCase_ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
UpperCAmelCase_ : Union[str, Any] = pipeline(task="text-generation" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , return_full_text=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = text_generator("This is a test" )
self.assertEqual(lowerCAmelCase_ , [{"generated_text": ANY(lowerCAmelCase_ )}] )
self.assertNotIn("This is a test" , outputs[0]["generated_text"] )
UpperCAmelCase_ : Optional[Any] = text_generator("This is a test" , return_full_text=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , [{"generated_text": ANY(lowerCAmelCase_ )}] )
self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) )
UpperCAmelCase_ : List[str] = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase_ )
self.assertEqual(
lowerCAmelCase_ , [
[{"generated_text": ANY(lowerCAmelCase_ )}, {"generated_text": ANY(lowerCAmelCase_ )}],
[{"generated_text": ANY(lowerCAmelCase_ )}, {"generated_text": ANY(lowerCAmelCase_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCAmelCase_ : Dict = text_generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase_ )
self.assertEqual(
lowerCAmelCase_ , [
[{"generated_text": ANY(lowerCAmelCase_ )}, {"generated_text": ANY(lowerCAmelCase_ )}],
[{"generated_text": ANY(lowerCAmelCase_ )}, {"generated_text": ANY(lowerCAmelCase_ )}],
] , )
with self.assertRaises(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = text_generator("test" , return_full_text=lowerCAmelCase_ , return_text=lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ ):
UpperCAmelCase_ : List[Any] = text_generator("test" , return_full_text=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = text_generator("test" , return_text=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCAmelCase_ : List[Any] = text_generator("" )
self.assertEqual(lowerCAmelCase_ , [{"generated_text": ANY(lowerCAmelCase_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCAmelCase_ : Tuple = text_generator("" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCAmelCase_ : int = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"]
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("This is a test" * 500 , max_new_tokens=20 )
UpperCAmelCase_ : Union[str, Any] = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowerCAmelCase_ ):
text_generator(
"This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
import torch
# Classic `model_kwargs`
UpperCAmelCase_ : Optional[int] = pipeline(
model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase_ : Tuple = pipe("This is a test" )
self.assertEqual(
lowerCAmelCase_ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCAmelCase_ : List[str] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCAmelCase_ : List[str] = pipe("This is a test" )
self.assertEqual(
lowerCAmelCase_ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCAmelCase_ : Optional[Any] = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCAmelCase_ : int = pipe("This is a test" )
self.assertEqual(
lowerCAmelCase_ , [
{
"generated_text": (
"This is a test test test test test test test test test test test test test test test test"
" test"
)
}
] , )
@require_torch
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
import torch
UpperCAmelCase_ : Any = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa )
pipe("This is a test" )
@require_torch
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
import torch
UpperCAmelCase_ : Any = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa )
pipe("This is a test" , do_sample=lowerCAmelCase_ , top_p=0.5 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
UpperCAmelCase_ : int = "Hello world"
UpperCAmelCase_ : List[Any] = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" )
if text_generator.model.framework == "tf":
UpperCAmelCase_ : List[Any] = logging.get_logger("transformers.generation.tf_utils" )
else:
UpperCAmelCase_ : int = logging.get_logger("transformers.generation.utils" )
UpperCAmelCase_ : Optional[Any] = "Both `max_new_tokens`" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowerCAmelCase_ ) as cl:
UpperCAmelCase_ : Tuple = text_generator(lowerCAmelCase_ , max_length=10 , max_new_tokens=1 )
self.assertIn(lowerCAmelCase_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowerCAmelCase_ ) as cl:
UpperCAmelCase_ : List[Any] = text_generator(lowerCAmelCase_ , max_new_tokens=1 )
self.assertNotIn(lowerCAmelCase_ , cl.out )
with CaptureLogger(lowerCAmelCase_ ) as cl:
UpperCAmelCase_ : Dict = text_generator(lowerCAmelCase_ , max_length=10 )
self.assertNotIn(lowerCAmelCase_ , cl.out )
| 366
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCamelCase_ :
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : MutableSequence[float] ) -> None:
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
UpperCAmelCase_ : list[float] = list(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = degree
def __add__( self : int , lowerCAmelCase_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
UpperCAmelCase_ : List[str] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
UpperCAmelCase_ : Optional[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : Union[str, Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[str] ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int | float ) -> int | float:
UpperCAmelCase_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
UpperCAmelCase_ : str = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : Union[str, Any] ) -> str:
return self.__str__()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int | float = 0 ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ : List[Any] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ : Union[str, Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Union[str, Any] , lowerCAmelCase_ : object ) -> bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , lowerCAmelCase_ : object ) -> bool:
return not self.__eq__(lowerCAmelCase_ )
| 253
| 0
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : str = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = XLNetTokenizer
lowerCamelCase = XLNetTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def UpperCAmelCase_ ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A_ : str = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Dict = """<s>"""
A_ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(_lowerCamelCase ) , 1006 )
def UpperCAmelCase_ ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCAmelCase_ ( self ) -> Tuple:
A_ : Union[str, Any] = XLNetTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
A_ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] )
A_ : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ : List[Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
A_ : Dict = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : List[str] = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase )
A_ : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Any = XLNetTokenizer(_lowerCamelCase , do_lower_case=_lowerCamelCase )
A_ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def UpperCAmelCase_ ( self ) -> int:
A_ : Optional[int] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
A_ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCamelCase )
A_ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCamelCase )
A_ : List[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
A_ : Dict = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
# fmt: off
A_ : int = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 344
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Optional[Any] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ['''DeiTFeatureExtractor''']
a__ : List[str] = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : List[Any] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "efficientformer"
def __init__( self : Any , UpperCAmelCase__ : List[int] = [3, 2, 6, 4] , UpperCAmelCase__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , UpperCAmelCase__ : List[bool] = [True, True, True, True] , UpperCAmelCase__ : int = 4_4_8 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 5 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1_6 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : float = 1E-5 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1E-12 , UpperCAmelCase__ : int = 2_2_4 , UpperCAmelCase__ : float = 1E-05 , **UpperCAmelCase__ : Tuple , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_expansion_ratio
__SCREAMING_SNAKE_CASE = downsamples
__SCREAMING_SNAKE_CASE = dim
__SCREAMING_SNAKE_CASE = key_dim
__SCREAMING_SNAKE_CASE = attention_ratio
__SCREAMING_SNAKE_CASE = resolution
__SCREAMING_SNAKE_CASE = pool_size
__SCREAMING_SNAKE_CASE = downsample_patch_size
__SCREAMING_SNAKE_CASE = downsample_stride
__SCREAMING_SNAKE_CASE = downsample_pad
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = num_metaad_blocks
__SCREAMING_SNAKE_CASE = distillation
__SCREAMING_SNAKE_CASE = use_layer_scale
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = batch_norm_eps
| 195
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
'''simple docstring'''
def __init__( self : Optional[Any], _lowerCamelCase : Optional[Any], _lowerCamelCase : str=3, _lowerCamelCase : Tuple=32, _lowerCamelCase : Union[str, Any]=3, _lowerCamelCase : Any=10, _lowerCamelCase : int=[10, 20, 30, 40], _lowerCamelCase : str=[1, 1, 2, 1], _lowerCamelCase : Any=True, _lowerCamelCase : List[str]=True, _lowerCamelCase : List[str]="relu", _lowerCamelCase : str=3, _lowerCamelCase : Dict=None, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = image_size
__A = num_channels
__A = embeddings_size
__A = hidden_sizes
__A = depths
__A = is_training
__A = use_labels
__A = hidden_act
__A = num_labels
__A = scope
__A = len(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size], self.num_labels )
__A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : int, _lowerCamelCase : int, _lowerCamelCase : Dict ):
'''simple docstring'''
__A = TFRegNetModel(config=_lowerCamelCase )
__A = model(_lowerCamelCase, training=_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : List[str], _lowerCamelCase : Optional[Any], _lowerCamelCase : Any ):
'''simple docstring'''
__A = self.num_labels
__A = TFRegNetForImageClassification(_lowerCamelCase )
__A = model(_lowerCamelCase, labels=_lowerCamelCase, training=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class snake_case ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Any = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
A_ : str = (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
A_ : Any = False
A_ : Optional[Any] = False
A_ : List[str] = False
A_ : Tuple = False
A_ : List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = TFRegNetModelTester(self )
__A = ConfigTester(self, config_class=_lowerCamelCase, has_text_modality=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0, reason='''TF does not support backprop for grouped convolutions on CPU.''', )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_lowerCamelCase )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['''pixel_values''']
self.assertListEqual(arg_names[:1], _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase : Optional[Any], _lowerCamelCase : int, _lowerCamelCase : List[str] ):
__A = model_class(_lowerCamelCase )
__A = model(**self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ), training=_lowerCamelCase )
__A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__A = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__A = layer_type
__A = True
check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_lowerCamelCase : Dict, _lowerCamelCase : Tuple, _lowerCamelCase : str, _lowerCamelCase : int={} ):
__A = model(_lowerCamelCase, return_dict=_lowerCamelCase, **_lowerCamelCase )
__A = model(_lowerCamelCase, return_dict=_lowerCamelCase, **_lowerCamelCase ).to_tuple()
def recursive_check(_lowerCamelCase : List[Any], _lowerCamelCase : int ):
if isinstance(_lowerCamelCase, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowerCamelCase, _lowerCamelCase ):
recursive_check(_lowerCamelCase, _lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_lowerCamelCase, _lowerCamelCase ) ), msg=(
'''Tuple and dict output are not equal. Difference:'''
f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
), )
recursive_check(_lowerCamelCase, _lowerCamelCase )
for model_class in self.all_model_classes:
__A = model_class(_lowerCamelCase )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase )
check_equivalence(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase, return_labels=_lowerCamelCase )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase, return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase )
check_equivalence(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, {'''output_hidden_states''': True} )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase, return_labels=_lowerCamelCase )
__A = self._prepare_for_class(_lowerCamelCase, _lowerCamelCase, return_labels=_lowerCamelCase )
check_equivalence(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, {'''output_hidden_states''': True} )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = TFRegNetModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=_lowerCamelCase, return_tensors='''tf''' )
# forward pass
__A = model(**_lowerCamelCase, training=_lowerCamelCase )
# verify the logits
__A = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape, _lowerCamelCase )
__A = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3], _lowerCamelCase, atol=1e-4 )
| 266
|
"""simple docstring"""
from __future__ import annotations
class snake_case :
'''simple docstring'''
def __init__( self : int, _lowerCamelCase : List[Any]=None ):
'''simple docstring'''
__A = data
__A = None
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
__A = []
__A = self
while temp:
string_rep.append(f'{temp.data}' )
__A = temp.next
return "->".join(_lowerCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not elements_list:
raise Exception('''The Elements List is empty''' )
__A = __A = Node(elements_list[0] )
for i in range(1 , len(__UpperCamelCase ) ):
__A = Node(elements_list[i] )
__A = current.next
return head
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase ( ):
"""simple docstring"""
from doctest import testmod
testmod()
__A = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print('''Linked List:''' )
print(__UpperCamelCase )
print('''Elements in Reverse:''' )
print_reverse(__UpperCamelCase )
if __name__ == "__main__":
main()
| 266
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360
|
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : int=None )-> str:
snake_case = data
snake_case = previous
snake_case = next_node
def __str__( self : Union[str, Any] )-> str:
return f'''{self.data}'''
def lowerCAmelCase ( self : Tuple )-> int:
return self.data
def lowerCAmelCase ( self : str )-> str:
return self.next
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : List[Any] )-> List[str]:
snake_case = head
def __iter__( self : Optional[int] )-> Dict:
return self
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
if not self.current:
raise StopIteration
else:
snake_case = self.current.get_data()
snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] )-> str:
snake_case = None # First node in list
snake_case = None # Last node in list
def __str__( self : List[str] )-> Any:
snake_case = self.head
snake_case = []
while current is not None:
nodes.append(current.get_data() )
snake_case = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : Optional[Any] , __snake_case : int )-> Optional[Any]:
snake_case = self.head
while current:
if current.get_data() == value:
return True
snake_case = current.get_next()
return False
def __iter__( self : Dict )-> List[Any]:
return LinkedListIterator(self.head )
def lowerCAmelCase ( self : Tuple )-> int:
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> None:
if self.head is None:
snake_case = node
snake_case = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : Node )-> None:
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> None:
snake_case = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.previous
if node.get_previous() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : Optional[int] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.next
if node.get_next() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : int )-> None:
snake_case = 1
snake_case = Node(__snake_case )
snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
snake_case = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> Node:
snake_case = self.head
while node:
if node.get_data() == item:
return node
snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase ( self : Any , __snake_case : Dict )-> Tuple:
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
snake_case = self.head.get_next()
if node == self.tail:
snake_case = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Node )-> None:
if node.get_next():
snake_case = node.previous
if node.get_previous():
snake_case = node.next
snake_case = None
snake_case = None
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
return self.head is None
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3
| 0
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@slow
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
_UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_UpperCAmelCase = bertabert.config.encoder.vocab_size
_UpperCAmelCase = tokenizer.sep_token_id
_UpperCAmelCase = tokenizer.cls_token_id
_UpperCAmelCase = 128
_UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
_UpperCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
_UpperCAmelCase = train_dataset.select(range(32 ) )
_UpperCAmelCase = val_dataset.select(range(16 ) )
_UpperCAmelCase = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase = tokenizer(batch['article'] , padding='max_length' , truncation=UpperCAmelCase , max_length=512 )
_UpperCAmelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=UpperCAmelCase , max_length=128 )
_UpperCAmelCase = inputs.input_ids
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = outputs.input_ids
_UpperCAmelCase = outputs.input_ids.copy()
_UpperCAmelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
_UpperCAmelCase = outputs.attention_mask
assert all(len(UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase ):
_UpperCAmelCase = pred.label_ids
_UpperCAmelCase = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
_UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase ) )] ) / len(UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase , batch_size=UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
_UpperCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase , batch_size=UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase , per_device_train_batch_size=UpperCAmelCase , per_device_eval_batch_size=UpperCAmelCase , predict_with_generate=UpperCAmelCase , evaluation_strategy='steps' , do_train=UpperCAmelCase , do_eval=UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_UpperCAmelCase = SeqaSeqTrainer(
model=UpperCAmelCase , args=UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , tokenizer=UpperCAmelCase , )
# start training
trainer.train()
| 39
|
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict ) -> None:
snake_case__ = [2, 1, 2, -1]
snake_case__ = [1, 2, 3, 4]
def lowerCAmelCase_ ( self: List[str] ) -> list[float]:
snake_case__ = len(self.first_signal )
snake_case__ = len(self.second_signal )
snake_case__ = max(UpperCamelCase , UpperCamelCase )
# create a zero matrix of max_length x max_length
snake_case__ = [[0] * max_length for i in range(UpperCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase ):
snake_case__ = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase )
for j, item in enumerate(UpperCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case__ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 307
| 0
|
import math
class lowerCamelCase__ :
'''simple docstring'''
def _lowerCamelCase ( self :Optional[Any] , a :list[list[float]] , a :list[int] ) -> int:
__UpperCamelCase : int = 0.0
__UpperCamelCase : str = 0.0
for i in range(len(a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _lowerCamelCase ( self :Dict , a :list[list[int | float]] , a :list[int] , a :int , a :float ) -> list[list[int | float]]:
for i in range(len(a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCamelCase : Dict = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__UpperCamelCase : Any = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__UpperCamelCase : List[str] = SelfOrganizingMap()
__UpperCamelCase : List[str] = 3
__UpperCamelCase : List[str] = 0.5
for _ in range(_lowerCamelCase):
for j in range(len(_lowerCamelCase)):
# training sample
__UpperCamelCase : Optional[int] = training_samples[j]
# Compute the winning vector
__UpperCamelCase : Any = self_organizing_map.get_winner(_lowerCamelCase , _lowerCamelCase)
# Update the winning vector
__UpperCamelCase : Union[str, Any] = self_organizing_map.update(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# classify test sample
__UpperCamelCase : Any = [0, 0, 0, 1]
__UpperCamelCase : Optional[Any] = self_organizing_map.get_winner(_lowerCamelCase , _lowerCamelCase)
# results
print(F'Clusters that the test sample belongs to : {winner}')
print(F'Weights that have been trained : {weights}')
# running the main() function
if __name__ == "__main__":
main()
| 151
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase : Any = logging.get_logger(__name__)
lowercase : Any = {'vocab_file': 'spiece.model'}
lowercase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :int , a :List[Any] , a :Optional[Any]=False , a :List[str]=True , a :str=False , a :Optional[Any]="<s>" , a :Tuple="</s>" , a :int="<unk>" , a :Optional[Any]="<sep>" , a :List[str]="<pad>" , a :Any="<cls>" , a :List[Any]="<mask>" , a :Optional[Any]=["<eop>", "<eod>"] , a :Optional[Dict[str, Any]] = None , **a :List[str] , ) -> None:
__UpperCamelCase : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
__UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
__UpperCamelCase : int = 3
__UpperCamelCase : Union[str, Any] = do_lower_case
__UpperCamelCase : str = remove_space
__UpperCamelCase : int = keep_accents
__UpperCamelCase : Optional[int] = vocab_file
__UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
__UpperCamelCase : Optional[Any] = jieba
__UpperCamelCase : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCamelCase ( self :Optional[int] ) -> List[str]:
return len(self.sp_model )
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Optional[int] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[int] ) -> int:
__UpperCamelCase : Tuple = self.__dict__.copy()
__UpperCamelCase : Optional[Any] = None
return state
def __setstate__( self :Optional[int] , a :Dict ) -> str:
__UpperCamelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCamelCase : Union[str, Any] = {}
__UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self :List[Any] , a :str ) -> int:
if self.remove_space:
__UpperCamelCase : int = " ".join(inputs.strip().split() )
else:
__UpperCamelCase : Union[str, Any] = inputs
__UpperCamelCase : List[str] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__UpperCamelCase : Tuple = unicodedata.normalize("NFKD" , a )
__UpperCamelCase : Optional[Any] = "".join([c for c in outputs if not unicodedata.combining(a )] )
if self.do_lower_case:
__UpperCamelCase : Any = outputs.lower()
return outputs
def _lowerCamelCase ( self :Tuple , a :str ) -> List[str]:
__UpperCamelCase : List[Any] = self.preprocess_text(a )
__UpperCamelCase : int = self.sp_model.encode(a , out_type=a )
__UpperCamelCase : Optional[Any] = []
for piece in pieces:
if len(a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__UpperCamelCase : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCamelCase : List[str] = cur_pieces[1:]
else:
__UpperCamelCase : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a )
else:
new_pieces.append(a )
return new_pieces
def _lowerCamelCase ( self :str , a :Dict ) -> List[str]:
return self.sp_model.PieceToId(a )
def _lowerCamelCase ( self :Tuple , a :int ) -> Tuple:
return self.sp_model.IdToPiece(a )
def _lowerCamelCase ( self :Union[str, Any] , a :Union[str, Any] ) -> List[Any]:
__UpperCamelCase : str = "".join(a ).replace(a , " " ).strip()
return out_string
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Tuple = [self.sep_token_id]
__UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self :Any , a :List[int] , a :Optional[List[int]] = None , a :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is not None:
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1, 1]
return ([0] * len(a )) + [1, 1]
def _lowerCamelCase ( self :Dict , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : Optional[int] = [self.sep_token_id]
__UpperCamelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self :Union[str, Any] , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : Tuple = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , "wb" ) as fi:
__UpperCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def _lowerCamelCase ( self :str , *a :str , **a :Any ) -> Tuple:
__UpperCamelCase : int = super()._decode(*a , **a )
__UpperCamelCase : int = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 151
| 1
|
import math
def _a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
a_ = '''Enter the base and the power separated by a comma: '''
a_, a_ = map(int, input(prompt).split(''','''))
a_, a_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
a_ = res(xa, ya)
a_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 340
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'vocab_file': 'spiece.model'}
_snake_case = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None:
_a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
_a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
_a : Optional[Any] = 3
_a : Tuple = do_lower_case
_a : Tuple = remove_space
_a : Tuple = keep_accents
_a : Tuple = vocab_file
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_a : int = jieba
_a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowercase ( self : Optional[Any] ) -> Any:
return len(self.sp_model )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
_a : Tuple = self.__dict__.copy()
_a : Tuple = None
return state
def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict:
_a : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_a : Tuple = {}
_a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict:
if self.remove_space:
_a : Optional[int] = """ """.join(inputs.strip().split() )
else:
_a : List[Any] = inputs
_a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ )
_a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] )
if self.do_lower_case:
_a : Union[str, Any] = outputs.lower()
return outputs
def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
_a : str = self.preprocess_text(UpperCAmelCase__ )
_a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
_a : Union[str, Any] = []
for piece in pieces:
if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_a : Dict = cur_pieces[1:]
else:
_a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase__ )
else:
new_pieces.append(UpperCAmelCase__ )
return new_pieces
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int:
return self.sp_model.PieceToId(UpperCAmelCase__ )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any:
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict:
_a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Optional[Any] = [self.sep_token_id]
_a : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1]
return ([0] * len(UpperCAmelCase__ )) + [1, 1]
def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , """wb""" ) as fi:
_a : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]:
_a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 294
| 0
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_lowerCAmelCase : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
_lowerCAmelCase : int = "CompVis/stable-diffusion-v1-2"
_lowerCAmelCase : List[Any] = "CompVis/stable-diffusion-v1-3"
_lowerCAmelCase : Any = "CompVis/stable-diffusion-v1-4"
class UpperCAmelCase_ ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , A : bool = True , ):
super()._init_()
_UpperCAmelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained(A )
_UpperCAmelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained(A )
_UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained(A )
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , requires_safety_checker=A , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def snake_case_ ( self : Any ):
return {k: getattr(self , A ) for k in self.config.keys() if not k.startswith("_" )}
def snake_case_ ( self : Tuple , A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCAmelCase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def snake_case_ ( self : int ):
self.enable_attention_slicing(A )
@torch.no_grad()
def snake_case_ ( self : Optional[Any] , A : Union[str, List[str]] , A : int = 5_1_2 , A : int = 5_1_2 , A : int = 5_0 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , **A : Any , ):
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def snake_case_ ( self : str , A : Union[str, List[str]] , A : int = 5_1_2 , A : int = 5_1_2 , A : int = 5_0 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , **A : List[Any] , ):
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def snake_case_ ( self : Any , A : Union[str, List[str]] , A : int = 5_1_2 , A : int = 5_1_2 , A : int = 5_0 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , **A : Tuple , ):
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def snake_case_ ( self : Any , A : Union[str, List[str]] , A : int = 5_1_2 , A : int = 5_1_2 , A : int = 5_0 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , **A : int , ):
return self.pipea(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
@torch.no_grad()
def snake_case_ ( self : Optional[int] , A : Union[str, List[str]] , A : int = 5_1_2 , A : int = 5_1_2 , A : int = 5_0 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , **A : str , ):
_UpperCAmelCase : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
self.to(A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
_UpperCAmelCase : Any = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.2
_UpperCAmelCase : Tuple = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.3
_UpperCAmelCase : Dict = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get first result from Stable Diffusion Checkpoint v1.4
_UpperCAmelCase : Dict = self.textaimg_sda_a(
prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 202
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : str
__SCREAMING_SNAKE_CASE : List[str]
__SCREAMING_SNAKE_CASE : Optional[List[str]]
@dataclass
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : List[int]
__SCREAMING_SNAKE_CASE : List[int]
__SCREAMING_SNAKE_CASE : Optional[List[int]] = None
__SCREAMING_SNAKE_CASE : Optional[List[int]] = None
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'train'
__SCREAMING_SNAKE_CASE : Tuple = 'dev'
__SCREAMING_SNAKE_CASE : Optional[int] = 'test'
class UpperCAmelCase_ :
@staticmethod
def snake_case_ ( A : Union[str, Any] , A : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def snake_case_ ( A : str ):
raise NotImplementedError
@staticmethod
def snake_case_ ( A : List[InputExample] , A : List[str] , A : int , A : PreTrainedTokenizer , A : Optional[int]=False , A : List[str]="[CLS]" , A : List[Any]=1 , A : str="[SEP]" , A : int=False , A : int=False , A : Any=0 , A : List[str]=0 , A : Dict=-1_0_0 , A : str=0 , A : Optional[Any]=True , ):
_UpperCAmelCase : Dict = {label: i for i, label in enumerate(A )}
_UpperCAmelCase : str = []
for ex_index, example in enumerate(A ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" , A , len(A ) )
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = []
for word, label in zip(example.words , example.labels ):
_UpperCAmelCase : str = tokenizer.tokenize(A )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A ) > 0:
tokens.extend(A )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_UpperCAmelCase : List[str] = tokenizer.num_special_tokens_to_add()
if len(A ) > max_seq_length - special_tokens_count:
_UpperCAmelCase : List[Any] = tokens[: (max_seq_length - special_tokens_count)]
_UpperCAmelCase : List[Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_UpperCAmelCase : Dict = [sequence_a_segment_id] * len(A )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_UpperCAmelCase : str = [cls_token] + tokens
_UpperCAmelCase : Dict = [pad_token_label_id] + label_ids
_UpperCAmelCase : Any = [cls_token_segment_id] + segment_ids
_UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(A )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_UpperCAmelCase : List[Any] = [1 if mask_padding_with_zero else 0] * len(A )
# Zero-pad up to the sequence length.
_UpperCAmelCase : List[str] = max_seq_length - len(A )
if pad_on_left:
_UpperCAmelCase : str = ([pad_token] * padding_length) + input_ids
_UpperCAmelCase : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_UpperCAmelCase : Any = ([pad_token_segment_id] * padding_length) + segment_ids
_UpperCAmelCase : Dict = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
assert len(A ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(A ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(A ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(A ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(A ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(A ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase : Dict = None
features.append(
InputFeatures(
input_ids=A , attention_mask=A , token_type_ids=A , label_ids=A ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : List[InputFeatures]
__SCREAMING_SNAKE_CASE : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Dict , A : TokenClassificationTask , A : str , A : PreTrainedTokenizer , A : List[str] , A : str , A : Optional[int] = None , A : List[str]=False , A : Split = Split.train , ):
# Load data features from cache or dataset file
_UpperCAmelCase : int = os.path.join(
A , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(A ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : List[str] = cached_features_file + ".lock"
with FileLock(A ):
if os.path.exists(A ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
_UpperCAmelCase : Tuple = torch.load(A )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
_UpperCAmelCase : List[str] = token_classification_task.read_examples_from_file(A , A )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase : List[Any] = token_classification_task.convert_examples_to_features(
A , A , A , A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'Saving features into cached file {cached_features_file}' )
torch.save(self.features , A )
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : List[str] , A : Optional[Any] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : List[InputFeatures]
__SCREAMING_SNAKE_CASE : int = -1_0_0
def __init__( self : Tuple , A : TokenClassificationTask , A : str , A : PreTrainedTokenizer , A : List[str] , A : str , A : Optional[int] = None , A : Optional[Any]=False , A : Split = Split.train , ):
_UpperCAmelCase : Union[str, Any] = token_classification_task.read_examples_from_file(A , A )
# TODO clean up all this to leverage built-in features of tokenizers
_UpperCAmelCase : List[str] = token_classification_task.convert_examples_to_features(
A , A , A , A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_UpperCAmelCase : List[str] = tf.data.Dataset.from_generator(
A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_UpperCAmelCase : List[Any] = tf.data.Dataset.from_generator(
A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def snake_case_ ( self : str ):
_UpperCAmelCase : Dict = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ):
return len(self.features )
def __getitem__( self : int , A : int ):
return self.features[i]
| 202
| 1
|
import os
import numpy
import onnx
def a ( A__ : Tuple , A__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =a.name
_lowercase =b.name
_lowercase =''
_lowercase =''
_lowercase =a == b
_lowercase =name_a
_lowercase =name_b
return res
def a ( A__ : Union[str, Any] , A__ : List[str] , A__ : str ) -> Optional[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(A__ , A__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , A__ , A__ )
_graph_replace_input_with(node_proto.attribute[1].g , A__ , A__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , A__ , A__ )
def a ( A__ : List[Any] , A__ : Any , A__ : Dict ) -> Tuple:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(A__ , A__ , A__ )
def a ( A__ : List[Any] , A__ : List[str] , A__ : Optional[int] ) -> List[str]:
"""simple docstring"""
_lowercase =list(model.graph.initializer )
_lowercase =list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowercase =inits[i].name
_lowercase =inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , A__ , A__ )
def a ( A__ : int ) -> List[Any]:
"""simple docstring"""
_lowercase =os.path.dirname(A__ )
_lowercase =os.path.basename(A__ )
_lowercase =onnx.load(os.path.join(A__ , A__ ) )
_lowercase =list(model.graph.initializer )
_lowercase =set()
_lowercase ={}
_lowercase =[]
_lowercase =0
for i in range(len(A__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(A__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(A__ )
dup_set.add(A__ )
_lowercase =inits[j].data_type
_lowercase =numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , A__ )
total_reduced_size += mem_size
_lowercase =inits[i].name
_lowercase =inits[j].name
if name_i in dup_map:
dup_map[name_i].append(A__ )
else:
_lowercase =[name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
_lowercase =sorted(A__ )
_remove_dup_initializers_from_model(A__ , A__ , A__ )
_lowercase ='optimized_' + model_file_name
_lowercase =os.path.join(A__ , A__ )
onnx.save(A__ , A__ )
return new_model
| 205
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(4_2)
lowercase_ = 'sshleifer/student_marian_en_ro_6_1'
lowercase_ = 'sshleifer/tiny-mbart'
@require_torch
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , ) -> Dict:
'''simple docstring'''
_lowercase =self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowerCAmelCase , num_train_epochs=1 , distributed=lowerCAmelCase , extra_args_str=lowerCAmelCase , predict_with_generate=lowerCAmelCase , do_train=lowerCAmelCase , do_eval=lowerCAmelCase , do_predict=lowerCAmelCase , )
_lowercase =TrainerState.load_from_json(os.path.join(lowerCAmelCase , 'trainer_state.json' ) ).log_history
if not do_eval:
return
_lowercase =[log for log in logs if 'eval_loss' in log.keys()]
_lowercase =eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_lowercase =eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , lowerCAmelCase )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A__ ( self ) -> Tuple:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase )
@require_torch_multi_gpu
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A__ ( self ) -> Dict:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=lowerCAmelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick(
distributed=lowerCAmelCase , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=lowerCAmelCase )
@require_apex
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowerCAmelCase , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def A__ ( self , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase ={
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
_lowercase =experiments[experiment_id]
_lowercase ={'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
_lowercase ='Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowerCAmelCase , extra_args_str=data['extra_args_str'] )
_lowercase =len(re.findall(lowerCAmelCase , cl.err ) )
self.assertEqual(lowerCAmelCase , data['n_matches'] )
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.run_trainer(
eval_steps=2 , max_len=128 , model_name=lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=lowerCAmelCase , )
# Check metrics
_lowercase =TrainerState.load_from_json(os.path.join(lowerCAmelCase , 'trainer_state.json' ) ).log_history
_lowercase =[log for log in logs if 'eval_loss' in log.keys()]
_lowercase =eval_metrics[0]
_lowercase =eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , lowerCAmelCase )
# test if do_predict saves generations and metrics
_lowercase =os.listdir(lowerCAmelCase )
_lowercase ={os.path.basename(lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A__ ( self ) -> List[str]:
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowerCAmelCase ) -> Tuple[int, float]:
_lowercase ='--skip_memory_metrics 0'
_lowercase =self.run_trainer(
max_len=128 , model_name=lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=lowerCAmelCase , distributed=lowerCAmelCase , extra_args_str=lowerCAmelCase , do_eval=lowerCAmelCase , do_predict=lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
_lowercase =TrainerState.load_from_json(Path(lowerCAmelCase , 'trainer_state.json' ) ).log_history
_lowercase =int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
_lowercase =int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
_lowercase =logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_lowercase , _lowercase , _lowercase =train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_lowercase , _lowercase , _lowercase =train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_lowercase =gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_lowercase =gpu_peak_mem_orig + gpu_alloc_mem_orig
_lowercase =gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_lowercase =gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_lowercase =120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowerCAmelCase , lowerCAmelCase , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
lowerCAmelCase , lowerCAmelCase , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
lowerCAmelCase , lowerCAmelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 3e-3 , lowerCAmelCase = "adafactor" , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = 0 , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = None , ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
_lowercase =self.get_auto_remove_tmp_dir()
_lowercase =F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowerCAmelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowerCAmelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
_lowercase =F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowerCAmelCase )}
'''.split()
_lowercase ='\n --do_predict\n '.split()
_lowercase =[]
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_lowercase =get_gpu_count()
_lowercase =get_torch_dist_unique_port()
_lowercase =F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
_lowercase =[sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCAmelCase , env=self.get_env() )
else:
_lowercase =['run_translation.py'] + args
with patch.object(lowerCAmelCase , 'argv' , lowerCAmelCase ):
main()
return output_dir
| 205
| 1
|
'''simple docstring'''
import argparse
import datetime
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[str] = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
_a : Optional[int] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__a ) < 1_1:
raise ValueError('Must be 10 characters long' )
# Get month
_a : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('Month must be between 1 - 12' )
_a : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
_a : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
_a : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
_a : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
_a : Dict = datetime.date(int(__a ) , int(__a ) , int(__a ) )
# Start math
if m <= 2:
_a : Optional[Any] = y - 1
_a : Any = m + 1_2
# maths var
_a : int = int(str(__a )[:2] )
_a : int = int(str(__a )[2:] )
_a : int = int(2.6 * m - 5.39 )
_a : int = int(c / 4 )
_a : int = int(k / 4 )
_a : int = int(d + k )
_a : int = int(t + u + v + x )
_a : int = int(z - (2 * c) )
_a : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
_a : str = f"""Your date {date_input}, is a {days[str(__a )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
__lowerCAmelCase = parser.parse_args()
zeller(args.date_input)
| 5
|
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCAmelCase_ ():
"""simple docstring"""
return __name__.split('.' )[0]
def UpperCAmelCase_ ():
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_a : str = logging.StreamHandler() # Set sys.stderr as stream.
_a : Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_a : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_a : List[str] = False
def UpperCAmelCase_ ():
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_a : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_a : str = None
def UpperCAmelCase_ ():
"""simple docstring"""
return log_levels
def UpperCAmelCase_ (__a : Optional[str] = None ):
"""simple docstring"""
if name is None:
_a : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
return set_verbosity(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__a )
def UpperCAmelCase_ (__a : logging.Handler ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Union[str, Any] = False
def UpperCAmelCase_ ():
"""simple docstring"""
_configure_library_root_logger()
_a : Dict = True
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = _get_library_root_logger().handlers
for handler in handlers:
_a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__a )
def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ):
"""simple docstring"""
_a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a )
if no_advisory_warnings:
return
self.warning(*__a , **__a )
__lowerCAmelCase = warning_advice
@functools.lru_cache(__a )
def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ):
"""simple docstring"""
self.warning(*__a , **__a )
__lowerCAmelCase = warning_once
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument
'''simple docstring'''
_a : int = args[0] if args else None
def __iter__( self : str ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] ,_a : int ):
'''simple docstring'''
def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
'''simple docstring'''
return self
def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ):
'''simple docstring'''
return
class UpperCAmelCase__ :
"""simple docstring"""
def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_a ,**_a )
else:
return EmptyTqdm(*_a ,**_a )
def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
_a : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_a ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : str = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase_ ():
"""simple docstring"""
global _tqdm_active
_a : Dict = False
hf_hub_utils.disable_progress_bars()
| 5
| 1
|
"""simple docstring"""
from __future__ import annotations
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase=None ) -> List[Any]:
_a = data
_a = None
def __repr__( self ) -> Union[str, Any]:
_a = []
_a = self
while temp:
string_rep.append(F'{temp.data}' )
_a = temp.next
return "->".join(__UpperCAmelCase )
def A_ ( _lowerCAmelCase : list ):
"""simple docstring"""
if not elements_list:
raise Exception('''The Elements List is empty''' )
_a = _a = Node(elements_list[0] )
for i in range(1, len(_lowerCAmelCase ) ):
_a = Node(elements_list[i] )
_a = current.next
return head
def A_ ( _lowerCAmelCase : Node ):
"""simple docstring"""
if head_node is not None and isinstance(_lowerCAmelCase, _lowerCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def A_ ( ):
"""simple docstring"""
from doctest import testmod
testmod()
_a = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(_lowerCAmelCase )
print('''Elements in Reverse:''' )
print_reverse(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 320
|
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@require_torch
def _UpperCAmelCase ( self ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCAmelCase )
BertModel.from_pretrained(__UpperCAmelCase )
BertTokenizer.from_pretrained(__UpperCAmelCase )
pipeline(task='''fill-mask''' , model=__UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self ) -> Tuple:
_a = '''
from transformers import pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_a = self.get_env()
_a = '''1'''
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def _UpperCAmelCase ( self ) -> List[Any]:
_a = '''
from transformers import AutoModel
'''
_a = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 320
| 1
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
A : List[str] = 'Speech2TextFeatureExtractor'
A : Any = 'Speech2TextTokenizer'
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = self.feature_extractor
lowercase : str = False
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowercase : int = kwargs.pop('''raw_speech''' )
else:
lowercase : Any = kwargs.pop('''audio''' , SCREAMING_SNAKE_CASE__ )
lowercase : int = kwargs.pop('''sampling_rate''' , SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = kwargs.pop('''text''' , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowercase : List[str] = args[0]
lowercase : str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowercase : int = self.feature_extractor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None:
lowercase : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase : str = encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowercase : Dict = True
lowercase : Dict = self.tokenizer
yield
lowercase : Union[str, Any] = self.feature_extractor
lowercase : List[str] = False
| 351
|
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->list[int]:
"""simple docstring"""
lowercase : Dict = int(_UpperCamelCase )
# Initialize Result
lowercase : Union[str, Any] = []
# Traverse through all denomination
for denomination in reversed(_UpperCamelCase ):
# Find denominations
while int(_UpperCamelCase ) >= int(_UpperCamelCase ):
total_value -= int(_UpperCamelCase )
answer.append(_UpperCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__a = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
__a = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 173
| 0
|
from typing import List
import numpy as np
def A (__A : dict ) -> int:
"""simple docstring"""
UpperCAmelCase_ = {key: len(__A ) for key, value in gen_kwargs.items() if isinstance(__A , __A )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(F"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
UpperCAmelCase_ = max(lists_lengths.values() , default=0 )
return max(1 , __A )
def A (__A : int , __A : int ) -> List[range]:
"""simple docstring"""
UpperCAmelCase_ = []
for group_idx in range(__A ):
UpperCAmelCase_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
UpperCAmelCase_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
UpperCAmelCase_ = range(__A , start + num_shards_to_add )
shards_indices_per_group.append(__A )
return shards_indices_per_group
def A (__A : dict , __A : int ) -> List[dict]:
"""simple docstring"""
UpperCAmelCase_ = _number_of_shards_in_gen_kwargs(__A )
if num_shards == 1:
return [dict(__A )]
else:
UpperCAmelCase_ = _distribute_shards(num_shards=__A , max_num_jobs=__A )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__A , __A )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__A ) )
]
def A (__A : List[dict] ) -> dict:
"""simple docstring"""
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __A )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A (__A : np.random.Generator , __A : dict ) -> dict:
"""simple docstring"""
UpperCAmelCase_ = {len(__A ) for value in gen_kwargs.values() if isinstance(__A , __A )}
UpperCAmelCase_ = {}
for size in list_sizes:
UpperCAmelCase_ = list(range(__A ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
UpperCAmelCase_ = dict(__A )
for key, value in shuffled_kwargs.items():
if isinstance(__A , __A ):
UpperCAmelCase_ = [value[i] for i in indices_per_size[len(__A )]]
return shuffled_kwargs
| 51
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Any = PhobertTokenizer
UpperCAmelCase__ : List[str] = False
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = ['''#version: 0.2''', '''l à</w>''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""")
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(_snake_case))
def lowerCamelCase ( self : int , **_snake_case : Any):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''Tôi là VinAI Research'''
UpperCAmelCase_ = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
UpperCAmelCase_ = '''Tôi là VinAI Research'''
UpperCAmelCase_ = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
print(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , _snake_case)
| 51
| 1
|
def lowerCAmelCase_ ( __a=28123 ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Tuple =[1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowerCamelCase__: Union[str, Any] =set()
lowerCamelCase__: Dict =0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_SCREAMING_SNAKE_CASE )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 351
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: int =("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
lowerCamelCase__: Any =(
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__a ):
os.makedirs(__a )
lowerCamelCase__: Tuple =model.state_dict()
def to_tf_var_name(__a ):
for patt, repl in iter(__a ):
lowerCamelCase__: Tuple =name.replace(__a , __a )
return F"""bert/{name}"""
def create_tf_var(__a , __a , __a ):
lowerCamelCase__: Union[str, Any] =tf.dtypes.as_dtype(tensor.dtype )
lowerCamelCase__: Any =tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
lowerCamelCase__: Tuple =to_tf_var_name(__a )
lowerCamelCase__: str =state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
lowerCamelCase__: List[Any] =torch_tensor.T
lowerCamelCase__: Optional[int] =create_tf_var(tensor=__a , name=__a , session=__a )
tf.keras.backend.set_value(__a , __a )
lowerCamelCase__: Union[str, Any] =session.run(__a )
print(F"""Successfully created {tf_name}: {np.allclose(__a , __a )}""" )
lowerCamelCase__: List[Any] =tf.train.Saver(tf.trainable_variables() )
saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) )
def lowerCAmelCase_ ( __a=None ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: List[str] =argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" )
lowerCamelCase__: Optional[Any] =parser.parse_args(__a )
lowerCamelCase__: Dict =BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 273
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Tuple = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __SCREAMING_SNAKE_CASE (_UpperCAmelCase ):
"""simple docstring"""
__a ='open-llama'
def __init__( self : Tuple , __a : str=10_00_00 , __a : str=40_96 , __a : List[str]=1_10_08 , __a : Optional[Any]=32 , __a : str=32 , __a : int="silu" , __a : int=20_48 , __a : Union[str, Any]=0.02 , __a : Dict=1e-6 , __a : Dict=True , __a : Optional[Any]=0 , __a : List[Any]=1 , __a : Dict=2 , __a : List[str]=False , __a : int=True , __a : List[Any]=0.1 , __a : Union[str, Any]=0.1 , __a : str=True , __a : Tuple=True , __a : List[str]=None , **__a : Tuple , ):
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = initializer_range
_a = rms_norm_eps
_a = use_cache
_a = kwargs.pop(
"use_memorry_efficient_attention" , A_ )
_a = hidden_dropout_prob
_a = attention_dropout_prob
_a = use_stable_embedding
_a = shared_input_output_embedding
_a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , tie_word_embeddings=A_ , **A_ , )
def UpperCamelCase__ ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
_a = self.rope_scaling.get("type" , A_ )
_a = self.rope_scaling.get("factor" , A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 63
|
def _lowercase ( lowercase__ ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
__lowerCAmelCase : int = sorted(string.lower() )
return len(lowercase__ ) == len(set(lowercase__ ) )
if __name__ == "__main__":
_UpperCamelCase = input("Enter a string ").strip()
_UpperCamelCase = is_isogram(input_str)
print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 275
| 0
|
from __future__ import annotations
from collections.abc import MutableSequence
class __A :
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
lowerCamelCase =list(UpperCAmelCase_ )
lowerCamelCase =degree
def __add__( self , UpperCAmelCase_ ):
if self.degree > polynomial_a.degree:
lowerCamelCase =self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCAmelCase_ )
else:
lowerCamelCase =polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCAmelCase_ )
def __sub__( self , UpperCAmelCase_ ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , UpperCAmelCase_ ):
lowerCamelCase =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
lowerCamelCase =""""""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCAmelCase_ )
return polynomial
def __repr__( self ):
return self.__str__()
def _snake_case ( self ):
lowerCamelCase =[0] * self.degree
for i in range(self.degree ):
lowerCamelCase =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ = 0 ):
lowerCamelCase =[0] * (self.degree + 2)
lowerCamelCase =constant
for i in range(self.degree + 1 ):
lowerCamelCase =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCAmelCase_ )
def __eq__( self , UpperCAmelCase_ ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , UpperCAmelCase_ ):
return not self.__eq__(UpperCAmelCase_ )
| 262
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowercase ( _UpperCAmelCase = "isbn/0140328726" ) -> dict:
lowerCamelCase =olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowerCamelCase =F"""{olid} is not a valid Open Library olid"""
raise ValueError(_UpperCAmelCase )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowercase ( _UpperCAmelCase ) -> dict:
lowerCamelCase ={
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowerCamelCase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCamelCase =[
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowerCamelCase =data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =""", """.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase__ : List[str] =input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
UpperCAmelCase__ : Dict =summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('''\n'''.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 262
| 1
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowercase = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(__SCREAMING_SNAKE_CASE )
else:
lowercase = sylvester(number - 1 )
lowercase = num - 1
lowercase = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 195
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase = mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
lowercase = max(
mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , )
lowercase = val
return f[i][j]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if not (isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
lowercase = len(__SCREAMING_SNAKE_CASE )
if num_items != len(__SCREAMING_SNAKE_CASE ):
lowercase = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(__SCREAMING_SNAKE_CASE )} values'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
if not isinstance(wt[i] , __SCREAMING_SNAKE_CASE ):
lowercase = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = set()
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return optimal_val, example_optional_set
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
optimal_set.add(__SCREAMING_SNAKE_CASE )
_construct_solution(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = [3, 2, 4, 4]
UpperCAmelCase = [4, 3, 2, 3]
UpperCAmelCase = 4
UpperCAmelCase = 6
UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase , UpperCAmelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase , UpperCAmelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 195
| 1
|
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def __A ( ):
_UpperCAmelCase : Optional[int] = 9
_UpperCAmelCase : List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCAmelCase : List[str] = kruskal(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowerCamelCase ) == sorted(__lowerCamelCase )
| 360
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class __lowerCAmelCase ( __a ):
snake_case : Optional[Any] = """luke"""
def __init__(self , lowerCAmelCase__=5_0_2_6_7 , lowerCAmelCase__=5_0_0_0_0_0 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Any = entity_vocab_size
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Dict = entity_emb_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Optional[int] = use_entity_aware_attention
_UpperCAmelCase : Optional[Any] = classifier_dropout
| 170
| 0
|
"""simple docstring"""
import argparse
_a : Dict = 'docs/source/_static/js/custom.js'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Dict:
with open(snake_case__ ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_lowerCAmelCase : Dict = f.readlines()
_lowerCAmelCase : Dict = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
_lowerCAmelCase : Union[str, Any] = f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(snake_case__ )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
_a : List[str] = parser.parse_args()
update_custom_js(args.version)
| 44
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class A ( __snake_case ):
__magic_name__ = DistilBertTokenizer
__magic_name__ = DistilBertTokenizerFast
__magic_name__ = True
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
A : Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : List[str] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE )
A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
A : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 3
| 0
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=2 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=None , __UpperCAmelCase=2 , __UpperCAmelCase=2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = patch_size
__UpperCamelCase = max_length
__UpperCamelCase = num_mel_bins
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = frequency_stride
__UpperCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
__UpperCamelCase = frequency_out_dimension * time_out_dimension
__UpperCamelCase = num_patches + 2
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, input_values, labels
def UpperCAmelCase ( self ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = ASTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
(
__UpperCamelCase
) = config_and_inputs
__UpperCamelCase = {'input_values': input_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ASTModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__UpperCAmelCase )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['input_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = ASTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ) -> Any:
__UpperCamelCase = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
__UpperCamelCase = torchaudio.load(_UpperCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.default_feature_extractor
__UpperCamelCase = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(__UpperCAmelCase )
__UpperCamelCase = self.default_feature_extractor
__UpperCamelCase = prepare_audio()
__UpperCamelCase = audio.squeeze().numpy()
__UpperCamelCase = feature_extractor(__UpperCAmelCase , sampling_rate=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__UpperCAmelCase )
# verify the logits
__UpperCamelCase = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCamelCase = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 359
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( snake_case :str , snake_case :str = "cpu" , snake_case :Union[str, None] = None ) -> None:
__UpperCamelCase = torch.load(snake_case , map_location=snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
__UpperCamelCase = v.half()
if save_path is None: # overwrite src_path
__UpperCamelCase = src_path
torch.save(snake_case , snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 263
| 0
|
import os
import numpy
import onnx
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = os.path.dirname(UpperCamelCase__ )
snake_case_ = os.path.basename(UpperCamelCase__ )
snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCamelCase__ )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model
| 285
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast
__SCREAMING_SNAKE_CASE : int = BloomTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file'''
__SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def a ( self ):
super().setUp()
snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def a ( self ):
snake_case_ = self.get_rust_tokenizer()
snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids']
self.assertListEqual(snake_case , snake_case )
snake_case_ = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def a ( self , snake_case=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
snake_case_ = 'This is a simple input'
snake_case_ = ['This is a simple input 1', 'This is a simple input 2']
snake_case_ = ('This is a simple input', 'This is a pair')
snake_case_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case , max_length=snake_case )
tokenizer_r.encode_plus(snake_case , max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case )
tokenizer_r.encode(snake_case , max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
snake_case_ = None # Hotfixing padding = None
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def a ( self ):
snake_case_ = self.get_rust_tokenizer()
snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case )
snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data
snake_case_ = list(sample_data.values() )
snake_case_ = list(map(tokenizer.encode , snake_case ) )
snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens]
self.assertListEqual(snake_case , snake_case )
def a ( self ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 285
| 1
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
UpperCAmelCase : Optional[Any] = True
except ImportError:
UpperCAmelCase : Tuple = False
try:
from torch.hub import _get_torch_home
UpperCAmelCase : Optional[Any] = _get_torch_home()
except ImportError:
UpperCAmelCase : Any = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
UpperCAmelCase : Optional[int] = os.path.join(torch_cache_home, """transformers""")
UpperCAmelCase : Optional[Any] = """https://cdn.huggingface.co"""
UpperCAmelCase : Dict = """https://s3.amazonaws.com/models.huggingface.co/bert"""
UpperCAmelCase : Tuple = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
UpperCAmelCase : Tuple = os.path.join(PATH, """config.yaml""")
UpperCAmelCase : Optional[Any] = os.path.join(PATH, """attributes.txt""")
UpperCAmelCase : Union[str, Any] = os.path.join(PATH, """objects.txt""")
UpperCAmelCase : int = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
UpperCAmelCase : Optional[Any] = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
UpperCAmelCase : Tuple = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
UpperCAmelCase : int = """pytorch_model.bin"""
UpperCAmelCase : Any = """config.yaml"""
def _A ( SCREAMING_SNAKE_CASE : Tuple=OBJECTS , SCREAMING_SNAKE_CASE : Tuple=ATTRIBUTES ):
"""simple docstring"""
a__ : str =[]
with open(SCREAMING_SNAKE_CASE ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
a__ : List[Any] =[]
with open(SCREAMING_SNAKE_CASE ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def _A ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
a__ : Optional[int] =OrderedDict()
with open(SCREAMING_SNAKE_CASE , "rb" ) as f:
a__ : List[str] =pkl.load(SCREAMING_SNAKE_CASE )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
a__ : str =ckp.pop(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
a__ : Tuple =torch.tensor(SCREAMING_SNAKE_CASE )
else:
assert isinstance(SCREAMING_SNAKE_CASE , torch.tensor ), type(SCREAMING_SNAKE_CASE )
a__ : Any =v
return r
class __lowerCAmelCase :
_lowercase : Optional[int] = {}
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = "root" , lowerCAmelCase__=0 ) -> str:
'''simple docstring'''
a__ : Dict =name
a__ : int =level
a__ : Tuple ={}
for k, v in dictionary.items():
if v is None:
raise ValueError()
a__ : Union[str, Any] =copy.deepcopy(lowerCAmelCase__ )
a__ : Optional[int] =copy.deepcopy(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : int =Config(lowerCAmelCase__ , name=lowerCAmelCase__ , level=level + 1 )
a__ : int =v
setattr(self , lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =d
def __repr__( self ) -> str:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : str =val
a__ : Any =val
a__ : Tuple =key.split("." )
a__ : Union[str, Any] =len(lowerCAmelCase__ ) - 1
a__ : Any =self._pointer
if len(lowerCAmelCase__ ) > 1:
for i, l in enumerate(lowerCAmelCase__ ):
if hasattr(self , lowerCAmelCase__ ) and isinstance(getattr(self , lowerCAmelCase__ ) , lowerCAmelCase__ ):
setattr(getattr(self , lowerCAmelCase__ ) , ".".join(levels[i:] ) , lowerCAmelCase__ )
if l == last_level:
a__ : List[str] =val
else:
a__ : int =pointer[l]
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return self._pointer
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
with open(F'''{file_name}''' , "w" ) as stream:
dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def _lowercase ( lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
with open(lowerCAmelCase__ ) as stream:
a__ : int =load(lowerCAmelCase__ , Loader=lowerCAmelCase__ )
return data
def __str__( self ) -> int:
'''simple docstring'''
a__ : List[Any] =" "
if self._name != "root":
a__ : str =F'''{t * (self._level-1)}{self._name}:\n'''
else:
a__ : Any =""
a__ : List[Any] =self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(lowerCAmelCase__ ).__name__})\n'''
a__ : Union[str, Any] =level
return r[:-1]
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : List[Any] =cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
return cls(lowerCAmelCase__ )
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =kwargs.pop("cache_dir" , lowerCAmelCase__ )
a__ : Dict =kwargs.pop("force_download" , lowerCAmelCase__ )
a__ : str =kwargs.pop("resume_download" , lowerCAmelCase__ )
a__ : int =kwargs.pop("proxies" , lowerCAmelCase__ )
a__ : List[str] =kwargs.pop("local_files_only" , lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ):
a__ : int =os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
elif os.path.isfile(lowerCAmelCase__ ) or is_remote_url(lowerCAmelCase__ ):
a__ : Tuple =pretrained_model_name_or_path
else:
a__ : Optional[int] =hf_bucket_url(lowerCAmelCase__ , filename=lowerCAmelCase__ , use_cdn=lowerCAmelCase__ )
try:
# Load from URL or cache if already cached
a__ : Dict =cached_path(
lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
a__ : str =Config.load_yaml(lowerCAmelCase__ )
except EnvironmentError:
a__ : Optional[int] ="Can't load config for"
raise EnvironmentError(lowerCAmelCase__ )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(lowerCAmelCase__ ), kwargs
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Any =torch.load("dump.pt" , map_location=in_tensor.device )
a__ : List[Any] =in_tensor.numpy()
a__ : Any =out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rtol=0.0_1 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
a__ : str =urlparse(SCREAMING_SNAKE_CASE )
return parsed.scheme in ("http", "https")
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple=True ):
"""simple docstring"""
a__ : Dict =CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
a__ : Any ="/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : Optional[int]=None , ):
"""simple docstring"""
a__ : Dict ="python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join("{}/{}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
a__ : int ={"user-agent": ua}
if resume_size > 0:
a__ : str ="bytes=%d-" % (resume_size,)
a__ : Union[str, Any] =requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE )
if response.status_code == 416: # Range not satisfiable
return
a__ : Optional[int] =response.headers.get("Content-Length" )
a__ : int =resume_size + int(SCREAMING_SNAKE_CASE ) if content_length is not None else None
a__ : List[str] =tqdm(
unit="B" , unit_scale=SCREAMING_SNAKE_CASE , total=SCREAMING_SNAKE_CASE , initial=SCREAMING_SNAKE_CASE , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(SCREAMING_SNAKE_CASE ) )
temp_file.write(SCREAMING_SNAKE_CASE )
progress.close()
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Tuple=10 , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : int=False , ):
"""simple docstring"""
if cache_dir is None:
a__ : str =TRANSFORMERS_CACHE
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ : int =str(SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
a__ : Dict =None
if not local_files_only:
try:
a__ : Union[str, Any] =requests.head(SCREAMING_SNAKE_CASE , allow_redirects=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , timeout=SCREAMING_SNAKE_CASE )
if response.status_code == 200:
a__ : Union[str, Any] =response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
a__ : List[str] =url_to_filename(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# get cache path to put the file
a__ : Optional[int] =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(SCREAMING_SNAKE_CASE ):
return cache_path
else:
a__ : List[Any] =[
file
for file in fnmatch.filter(os.listdir(SCREAMING_SNAKE_CASE ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(SCREAMING_SNAKE_CASE ) > 0:
return os.path.join(SCREAMING_SNAKE_CASE , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(SCREAMING_SNAKE_CASE ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
a__ : Dict =cache_path + ".lock"
with FileLock(SCREAMING_SNAKE_CASE ):
# If the download just completed while the lock was activated.
if os.path.exists(SCREAMING_SNAKE_CASE ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
a__ : Dict =cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(SCREAMING_SNAKE_CASE , "a+b" ) as f:
yield f
a__ : List[str] =_resumable_file_manager
if os.path.exists(SCREAMING_SNAKE_CASE ):
a__ : Dict =os.stat(SCREAMING_SNAKE_CASE ).st_size
else:
a__ : str =0
else:
a__ : Any =partial(tempfile.NamedTemporaryFile , dir=SCREAMING_SNAKE_CASE , delete=SCREAMING_SNAKE_CASE )
a__ : Dict =0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , SCREAMING_SNAKE_CASE , temp_file.name , )
http_get(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_size=SCREAMING_SNAKE_CASE , user_agent=SCREAMING_SNAKE_CASE , )
os.replace(temp_file.name , SCREAMING_SNAKE_CASE )
a__ : Optional[int] ={"url": url, "etag": etag}
a__ : str =cache_path + ".json"
with open(SCREAMING_SNAKE_CASE , "w" ) as meta_file:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return cache_path
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict=None ):
"""simple docstring"""
a__ : List[Any] =url.encode("utf-8" )
a__ : Tuple =shaaaa(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =url_hash.hexdigest()
if etag:
a__ : List[str] =etag.encode("utf-8" )
a__ : Any =shaaaa(SCREAMING_SNAKE_CASE )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Optional[int]=False , ):
"""simple docstring"""
if cache_dir is None:
a__ : Tuple =TRANSFORMERS_CACHE
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ : Any =str(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ : int =str(SCREAMING_SNAKE_CASE )
if is_remote_url(SCREAMING_SNAKE_CASE ):
# URL, so get it from the cache (downloading if necessary)
a__ : List[Any] =get_from_cache(
SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , user_agent=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
elif os.path.exists(SCREAMING_SNAKE_CASE ):
# File, and it exists.
a__ : Any =url_or_filename
elif urlparse(SCREAMING_SNAKE_CASE ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(SCREAMING_SNAKE_CASE ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(SCREAMING_SNAKE_CASE ) )
if extract_compressed_file:
if not is_zipfile(SCREAMING_SNAKE_CASE ) and not tarfile.is_tarfile(SCREAMING_SNAKE_CASE ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
a__ , a__ : Tuple =os.path.split(SCREAMING_SNAKE_CASE )
a__ : str =output_file.replace("." , "-" ) + "-extracted"
a__ : Optional[Any] =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if os.path.isdir(SCREAMING_SNAKE_CASE ) and os.listdir(SCREAMING_SNAKE_CASE ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
a__ : Optional[Any] =output_path + ".lock"
with FileLock(SCREAMING_SNAKE_CASE ):
shutil.rmtree(SCREAMING_SNAKE_CASE , ignore_errors=SCREAMING_SNAKE_CASE )
os.makedirs(SCREAMING_SNAKE_CASE )
if is_zipfile(SCREAMING_SNAKE_CASE ):
with ZipFile(SCREAMING_SNAKE_CASE , "r" ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE )
zip_file.close()
elif tarfile.is_tarfile(SCREAMING_SNAKE_CASE ):
a__ : List[Any] =tarfile.open(SCREAMING_SNAKE_CASE )
tar_file.extractall(SCREAMING_SNAKE_CASE )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(SCREAMING_SNAKE_CASE ) )
return output_path_extracted
return output_path
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]="," ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE ) as f:
a__ : Any =eval(f.read() )
else:
a__ : Optional[int] =requests.get(SCREAMING_SNAKE_CASE )
try:
a__ : Optional[Any] =requests.json()
except Exception:
a__ : Union[str, Any] =req.content.decode()
assert data is not None, "could not connect"
try:
a__ : Optional[Any] =eval(SCREAMING_SNAKE_CASE )
except Exception:
a__ : Any =data.split("\n" )
req.close()
return data
def _A ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
a__ : List[str] =requests.get(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =np.array(Image.open(BytesIO(response.content ) ) )
return img
def _A ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
a__ : Tuple =url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , "rb" ) as stream:
a__ : Any =pkl.load(SCREAMING_SNAKE_CASE )
a__ : Dict =weights.pop("model" )
a__ : Any ={}
for k, v in model.items():
a__ : Dict =torch.from_numpy(SCREAMING_SNAKE_CASE )
if "running_var" in k:
a__ : str =torch.tensor([0] )
a__ : Any =k.replace("running_var" , "num_batches_tracked" )
a__ : Optional[int] =zero
return new
def _A ( ):
"""simple docstring"""
print(f'''{os.path.abspath(os.path.join(SCREAMING_SNAKE_CASE , os.pardir ) )}/demo.ipynb''' )
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="RGB" ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if os.path.isfile(SCREAMING_SNAKE_CASE ):
a__ : Union[str, Any] =cva.imread(SCREAMING_SNAKE_CASE )
else:
a__ : Optional[Any] =get_image_from_url(SCREAMING_SNAKE_CASE )
assert img is not None, f'''could not connect to: {im}'''
a__ : str =cva.cvtColor(SCREAMING_SNAKE_CASE , cva.COLOR_BGR2RGB )
if input_format == "RGB":
a__ : List[Any] =img[:, :, ::-1]
return img
def _A ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ))
| 148
|
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.2_5) = }""")
print(F"""{price_plus_tax(1_2_5.5_0, 0.0_5) = }""")
| 148
| 1
|
"""simple docstring"""
import numpy
# List of input, output pairs
_A : int = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_A : Any = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_A : Tuple = [2, 4, 1, 5]
_A : Union[str, Any] = len(train_data)
_A : Any = 0.0_0_9
def __magic_name__ ( __snake_case : List[Any] , __snake_case : Any="train" ) -> List[Any]:
return calculate_hypothesis_value(__snake_case , __snake_case ) - output(
__snake_case , __snake_case )
def __magic_name__ ( __snake_case : List[str] ) -> Tuple:
lowercase : Dict = 0
for i in range(len(__snake_case ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __magic_name__ ( __snake_case : List[Any] , __snake_case : Optional[int] ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __magic_name__ ( __snake_case : int , __snake_case : Union[str, Any] ) -> Union[str, Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __magic_name__ ( __snake_case : List[Any] , __snake_case : Any=m ) -> str:
lowercase : Union[str, Any] = 0
for i in range(__snake_case ):
if index == -1:
summation_value += _error(__snake_case )
else:
summation_value += _error(__snake_case ) * train_data[i][0][index]
return summation_value
def __magic_name__ ( __snake_case : List[Any] ) -> Tuple:
lowercase : Optional[int] = summation_of_cost_derivative(__snake_case , __snake_case ) / m
return cost_derivative_value
def __magic_name__ ( ) -> Dict:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase : Union[str, Any] = 0.00_00_02
lowercase : Optional[int] = 0
lowercase : Tuple = 0
while True:
j += 1
lowercase : Union[str, Any] = [0, 0, 0, 0]
for i in range(0 , len(__snake_case ) ):
lowercase : Optional[int] = get_cost_derivative(i - 1 )
lowercase : Any = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__snake_case , __snake_case , atol=__snake_case , rtol=__snake_case , ):
break
lowercase : Optional[Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def __magic_name__ ( ) -> int:
for i in range(len(__snake_case ) ):
print(("Actual output value:", output(__snake_case , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(__snake_case , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 202
|
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : List[str]=7 ) -> str:
lowercase : int = None
if token is not None:
lowercase : Any = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase : int = "636036"
lowercase : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase : int = requests.get(__snake_case , headers=__snake_case ).json()
return result["workflow_runs"]
def __magic_name__ ( __snake_case : Dict ) -> Tuple:
lowercase : Tuple = get_daily_ci_runs(__snake_case )
lowercase : Union[str, Any] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase : List[Any] = workflow_run["id"]
break
return workflow_run_id
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> int:
lowercase : Dict = get_last_daily_ci_runs(__snake_case )
if workflow_run_id is not None:
lowercase : Dict = get_artifacts_links(worflow_run_id=__snake_case , token=__snake_case )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__snake_case , artifact_url=__snake_case , output_dir=__snake_case , token=__snake_case )
def __magic_name__ ( __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Tuple ) -> Optional[int]:
get_last_daily_ci_artifacts(__snake_case , __snake_case , __snake_case )
lowercase : str = {}
for artifact_name in artifact_names:
lowercase : Optional[Any] = os.path.join(__snake_case , f"""{artifact_name}.zip""" )
if os.path.isfile(__snake_case ):
lowercase : List[Any] = {}
with zipfile.ZipFile(__snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(__snake_case ):
# read the file
with z.open(__snake_case ) as f:
lowercase : str = f.read().decode("UTF-8" )
return results
| 202
| 1
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Any:
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase__ ( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ) -> int:
A = {}
A = {}
if prompt is not None:
A = prompt
if generate_kwargs is not None:
A = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
A = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
A = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Any:
return super().__call__(lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=None ) -> Optional[Any]:
A = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
raise ValueError(
f'Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '
"""Note also that one single text can be provided for conditional image to text generation.""" )
A = self.model.config.model_type
if model_type == "git":
A = self.image_processor(images=lowerCamelCase_ ,return_tensors=self.framework )
A = self.tokenizer(text=lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ).input_ids
A = [self.tokenizer.cls_token_id] + input_ids
A = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
A = self.image_processor(images=lowerCamelCase_ ,header_text=lowerCamelCase_ ,return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
A = self.image_processor(images=lowerCamelCase_ ,return_tensors=self.framework )
A = self.tokenizer(lowerCamelCase_ ,return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
A = self.image_processor(images=lowerCamelCase_ ,return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
A = None
return model_inputs
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=None ) -> Optional[int]:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] ,lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
A = None
if generate_kwargs is None:
A = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
A = model_inputs.pop(self.model.main_input_name )
A = self.model.generate(lowerCamelCase_ ,**lowerCamelCase_ ,**lowerCamelCase_ )
return model_outputs
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Optional[Any]:
A = []
for output_ids in model_outputs:
A = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ,)
}
records.append(lowerCamelCase_ )
return records
| 77
|
"""simple docstring"""
def _A ( ):
"""simple docstring"""
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
UpperCAmelCase =generate_large_matrix()
UpperCAmelCase =(
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( _a : list[list[int]] ):
"""simple docstring"""
assert all(row == sorted(_a , reverse=_a ) for row in grid )
assert all(list(_a ) == sorted(_a , reverse=_a ) for col in zip(*_a ) )
def _A ( _a : list[int] ):
"""simple docstring"""
A = 0
A = len(_a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
A = (left + right) // 2
A = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
A = mid + 1
else:
A = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_a )
def _A ( _a : list[list[int]] ):
"""simple docstring"""
A = 0
A = len(grid[0] )
for i in range(len(_a ) ):
A = find_negative_index(grid[i][:bound] )
total += bound
return (len(_a ) * len(grid[0] )) - total
def _A ( _a : list[list[int]] ):
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def _A ( _a : list[list[int]] ):
"""simple docstring"""
A = 0
for row in grid:
for i, number in enumerate(_a ):
if number < 0:
total += len(_a ) - i
break
return total
def _A ( ):
"""simple docstring"""
from timeit import timeit
print("""Running benchmarks""" )
A = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
A = timeit(f'{func}(grid=grid)' , setup=_a , number=5_0_0 )
print(f'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 77
| 1
|
import argparse
import datetime
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase ={
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
_lowercase ={0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__snake_case ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
_lowercase =int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
_lowercase =date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
_lowercase =int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
_lowercase =date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
_lowercase =int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
_lowercase =datetime.date(int(__snake_case ) , int(__snake_case ) , int(__snake_case ) )
# Start math
if m <= 2:
_lowercase =y - 1
_lowercase =m + 12
# maths var
_lowercase =int(str(__snake_case )[:2] )
_lowercase =int(str(__snake_case )[2:] )
_lowercase =int(2.6 * m - 5.39 )
_lowercase =int(c / 4 )
_lowercase =int(k / 4 )
_lowercase =int(d + k )
_lowercase =int(t + u + v + x )
_lowercase =int(z - (2 * c) )
_lowercase =round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
_lowercase =F"Your date {date_input}, is a {days[str(__snake_case )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCAmelCase__ = parser.parse_args()
zeller(args.date_input)
| 5
|
UpperCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __snake_case (_lowerCamelCase ):
lowerCAmelCase__ = "levit"
def __init__( self : str , _UpperCAmelCase : int=224 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : str=[128, 256, 384] , _UpperCAmelCase : Dict=[4, 8, 12] , _UpperCAmelCase : str=[4, 4, 4] , _UpperCAmelCase : Dict=[16, 16, 16] , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : List[Any]=[2, 2, 2] , _UpperCAmelCase : Any=[2, 2, 2] , _UpperCAmelCase : str=0.02 , **_UpperCAmelCase : Tuple , ) -> str:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : int = kernel_size
_lowerCAmelCase : str = stride
_lowerCAmelCase : Union[str, Any] = padding
_lowerCAmelCase : Union[str, Any] = hidden_sizes
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : Union[str, Any] = depths
_lowerCAmelCase : Optional[int] = key_dim
_lowerCAmelCase : Optional[int] = drop_path_rate
_lowerCAmelCase : Tuple = patch_size
_lowerCAmelCase : List[Any] = attention_ratio
_lowerCAmelCase : str = mlp_ratio
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : str = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __snake_case (_lowerCamelCase ):
lowerCAmelCase__ = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return 1E-4
| 353
|
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : str = len(UpperCamelCase_ ) + 1
_lowerCAmelCase : List[Any] = len(UpperCamelCase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_lowerCAmelCase : List[Any] = [[0 for i in range(UpperCamelCase_ )] for j in range(UpperCamelCase_ )]
# since string of zero length match pattern of zero length
_lowerCAmelCase : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , UpperCamelCase_ ):
_lowerCAmelCase : Optional[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , UpperCamelCase_ ):
_lowerCAmelCase : Tuple = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , UpperCamelCase_ ):
for j in range(1 , UpperCamelCase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_lowerCAmelCase : Dict = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_lowerCAmelCase : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_lowerCAmelCase : int = dp[i - 1][j]
else:
_lowerCAmelCase : List[str] = 0
else:
_lowerCAmelCase : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_lowerCamelCase : Any = "aab"
_lowerCamelCase : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 159
| 0
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : str ) -> list[int]:
'''simple docstring'''
return [ord(lowercase__ ) - 9_6 for elem in plain]
def _snake_case ( lowercase__ : list[int] ) -> str:
'''simple docstring'''
return "".join(chr(elem + 9_6 ) for elem in encoded )
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :str = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , lowercase__ )
print("""Decoded:""" , decode(lowercase__ ) )
if __name__ == "__main__":
main()
| 84
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx"
def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) )
lowerCAmelCase_ :List[Any] = torch.manual_seed(__A )
lowerCAmelCase_ :Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Dict = pipe(**__A ).images
lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :int = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :int = self.get_dummy_inputs()
lowerCAmelCase_ :List[str] = pipe(**__A ).images
lowerCAmelCase_ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :str = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images
lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Tuple = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images
lowerCAmelCase_ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Tuple = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Dict = pipe(**__A ).images
lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Dict = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[int] = ort.SessionOptions()
lowerCAmelCase_ :Dict = False
return options
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation"""
lowerCAmelCase_ :List[Any] = torch.manual_seed(0 )
lowerCAmelCase_ :str = pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :Dict = output.images
lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase_ :List[str] = init_image.resize((128, 128) )
lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation"""
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :int = output.images
lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 84
| 1
|
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
a : Tuple = 6_378_137.0
a : int = 6_356_752.314_245
a : Dict = 637_8137
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
snake_case_ = (AXIS_A - AXIS_B) / AXIS_A
snake_case_ = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
snake_case_ = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) )
snake_case_ = radians(__UpperCAmelCase )
snake_case_ = radians(__UpperCAmelCase )
# Equation
snake_case_ = sin((phi_a - phi_a) / 2 )
snake_case_ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
snake_case_ = sqrt(sin_sq_phi + (cos(__UpperCAmelCase ) * cos(__UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
_enforce_args(__UpperCAmelCase, __UpperCAmelCase )
if n == 0:
return 0
snake_case_ = float('''-inf''' )
for i in range(1, n + 1 ):
snake_case_ = max(
__UpperCAmelCase, prices[i - 1] + naive_cut_rod_recursive(n - i, __UpperCAmelCase ) )
return max_revue
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
_enforce_args(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
snake_case_ = float('''-inf''' )
for i in range(1, n + 1 ):
snake_case_ = max(
__UpperCAmelCase, prices[i - 1] + _top_down_cut_rod_recursive(n - i, __UpperCAmelCase, __UpperCAmelCase ), )
snake_case_ = max_revenue
return max_rev[n]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
_enforce_args(__UpperCAmelCase, __UpperCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
snake_case_ = [float('''-inf''' ) for _ in range(n + 1 )]
snake_case_ = 0
for i in range(1, n + 1 ):
snake_case_ = max_rev[i]
for j in range(1, i + 1 ):
snake_case_ = max(__UpperCAmelCase, prices[j - 1] + max_rev[i - j] )
snake_case_ = max_revenue_i
return max_rev[n]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
if n < 0:
snake_case_ = F"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(__UpperCAmelCase )
if n > len(__UpperCAmelCase ):
snake_case_ = (
'''Each integral piece of rod must have a corresponding price. '''
F"Got n = {n} but length of prices = {len(__UpperCAmelCase )}"
)
raise ValueError(__UpperCAmelCase )
def __magic_name__ ( ) -> Optional[int]:
'''simple docstring'''
snake_case_ = [6, 10, 12, 15, 20, 23]
snake_case_ = len(__UpperCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
snake_case_ = 36
snake_case_ = top_down_cut_rod(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = bottom_up_cut_rod(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = naive_cut_rod_recursive(__UpperCAmelCase, __UpperCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 72
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]:
"""simple docstring"""
A : Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
A : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
A : List[str] = parent
A : str = batch_size
A : Optional[Any] = seq_length
A : List[str] = is_training
A : List[Any] = use_input_mask
A : Optional[Any] = use_token_type_ids
A : Optional[Any] = use_labels
A : List[str] = vocab_size
A : Dict = hidden_size
A : Union[str, Any] = num_hidden_layers
A : Tuple = num_attention_heads
A : Dict = intermediate_size
A : Tuple = hidden_act
A : List[Any] = hidden_dropout_prob
A : Tuple = attention_probs_dropout_prob
A : int = max_position_embeddings
A : int = type_vocab_size
A : str = type_sequence_label_size
A : int = initializer_range
A : Optional[Any] = num_labels
A : Optional[int] = num_choices
A : Tuple = scope
A : Dict = embedding_size
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Union[str, Any] = None
if self.use_input_mask:
A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A : Dict = None
if self.use_token_type_ids:
A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Tuple = None
A : str = None
A : Any = None
if self.use_labels:
A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
A : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : int = TFMobileBertModel(config=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : List[str] = model(SCREAMING_SNAKE_CASE )
A : str = [input_ids, input_mask]
A : List[str] = model(SCREAMING_SNAKE_CASE )
A : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = TFMobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE )
A : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE )
A : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : str = TFMobileBertForPreTraining(config=SCREAMING_SNAKE_CASE )
A : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : str = self.num_labels
A : Tuple = TFMobileBertForSequenceClassification(config=SCREAMING_SNAKE_CASE )
A : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Dict = self.num_choices
A : Dict = TFMobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
A : Tuple = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : Union[str, Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : Tuple = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Dict = self.num_labels
A : Optional[Any] = TFMobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE )
A : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = TFMobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
A : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Any = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[str] = self.prepare_config_and_inputs()
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Any = config_and_inputs
A : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[int] = TFMobileBertModelTest.TFMobileBertModelTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
A : Optional[int] = TFMobileBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_tf
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
A : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
A : str = model(SCREAMING_SNAKE_CASE )[0]
A : Dict = [1, 6, 30522]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Union[str, Any] = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
| 3
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__snake_case : Optional[int] = """sshleifer/mar_enro_6_3_student"""
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Tuple:
super().setUp()
a_ : Union[str, Any] = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=_lowercase , )
a_ : Union[str, Any] = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
MarianMTModel.from_pretrained(_lowercase )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> int:
a_ : Any = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
a_ : List[str] = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
a_ : Dict = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
a_ : Optional[int] = bash_script.replace(_lowercase , str(_lowercase ) )
a_ : int = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
a_ : Dict = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
a_ : Union[str, Any] = ["""finetune.py"""] + bash_script.split() + args
with patch.object(_lowercase , """argv""" , _lowercase ):
a_ : Optional[Any] = argparse.ArgumentParser()
a_ : Tuple = pl.Trainer.add_argparse_args(_lowercase )
a_ : Any = SummarizationModule.add_model_specific_args(_lowercase , os.getcwd() )
a_ : str = parser.parse_args()
a_ : Union[str, Any] = main(_lowercase )
# Check metrics
a_ : Any = load_json(model.metrics_save_path )
a_ : List[Any] = metrics["""val"""][0]
a_ : Union[str, Any] = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _lowercase )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
a_ : Optional[Any] = os.listdir(_lowercase )
a_ : Dict = [x for x in contents if x.endswith(""".ckpt""" )][0]
a_ : str = os.path.join(args.output_dir , _lowercase )
a_ : Any = torch.load(_lowercase , map_location="""cpu""" )
a_ : Union[str, Any] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
a_ : List[Any] = {os.path.basename(_lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class A__(a_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Tuple = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
a_ : str = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
a_ : Union[str, Any] = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
a_ : Union[str, Any] = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
a_ : Any = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
a_ : Dict = bash_script.replace(_lowercase , str(_lowercase ) )
a_ : int = self.get_auto_remove_tmp_dir()
a_ : Optional[Any] = bash_script.replace("""--fp16""" , """""" )
a_ : List[str] = 6
a_ : str = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'''--num_train_epochs={epochs}''',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(_lowercase , """argv""" , _lowercase ):
a_ : int = argparse.ArgumentParser()
a_ : Any = pl.Trainer.add_argparse_args(_lowercase )
a_ : str = SummarizationDistiller.add_model_specific_args(_lowercase , os.getcwd() )
a_ : Any = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
a_ : Dict = distill_main(_lowercase )
# Check metrics
a_ : Any = load_json(model.metrics_save_path )
a_ : int = metrics["""val"""][0]
a_ : Union[str, Any] = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _lowercase )
# check lightning ckpt can be loaded and has a reasonable statedict
a_ : Dict = os.listdir(_lowercase )
a_ : List[Any] = [x for x in contents if x.endswith(""".ckpt""" )][0]
a_ : int = os.path.join(args.output_dir , _lowercase )
a_ : Union[str, Any] = torch.load(_lowercase , map_location="""cpu""" )
a_ : List[str] = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
a_ : List[str] = {os.path.basename(_lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 248
| 0
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_lowerCAmelCase : Any = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_lowerCAmelCase : Union[str, Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
_lowerCAmelCase : Optional[int] = [file for file in filepaths if """ """ in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
_lowerCAmelCase : str = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
_lowerCAmelCase : Tuple = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
_lowerCAmelCase : List[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 298
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : int = "mock-s3-bucket"
UpperCAmelCase__ : Any = f's3://{mock_bucket}'
UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case )
assert dataset_path.startswith("s3://" ) is False
UpperCAmelCase__ : str = "./local/path"
UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case )
assert dataset_path == new_dataset_path
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is True
UpperCAmelCase__ : str = fsspec.filesystem("file" )
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case )
UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case )
assert isinstance(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case )
UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCAmelCase__ : int = compressed_file_paths[protocol]
UpperCAmelCase__ : Any = "dataset.jsonl"
UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case )
assert fs.isfile(snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case )
UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case , snake_case , clobber=snake_case )
with pytest.warns(snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 298
| 1
|
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase = 10**9 ):
__lowerCAmelCase : Union[str, Any] = 1
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : int = 0
__lowerCAmelCase : Dict = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowerCAmelCase : int = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'{solution() = }')
| 86
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[Any] = 7
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase : int = 3_84
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : str = 37
__lowerCAmelCase : Any = 'gelu'
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : Union[str, Any] = 5_12
__lowerCAmelCase : int = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : int = 0.02
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Tuple = 1_28
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[str] = 9
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : Tuple = [input_ids, input_mask]
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_choices
__lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : List[Any] = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = True
if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : List[str] = outputs['encoder_hidden_states']
__lowerCAmelCase : Tuple = outputs['encoder_attentions']
else:
__lowerCAmelCase : Optional[int] = outputs['hidden_states']
__lowerCAmelCase : Tuple = outputs['attentions']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
| 86
| 1
|
import os
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
a__ : Optional[int] = os.path.join(os.path.dirname(_lowercase) , """num.txt""")
with open(_lowercase) as file_hand:
return str(sum(int(_lowercase) for line in file_hand))[:10]
if __name__ == "__main__":
print(solution())
| 359
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase : Union[str, Any] =data_utils.TransfoXLTokenizer
_lowercase : Tuple =data_utils.TransfoXLCorpus
_lowercase : Optional[int] =data_utils
_lowercase : int =data_utils
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Optional[Any]) -> List[Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowercase , """rb""") as fp:
a__ : Optional[Any] = pickle.load(_lowercase , encoding="""latin1""")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
a__ : Dict = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''')
a__ : List[str] = corpus.vocab.__dict__
torch.save(_lowercase , _lowercase)
a__ : Optional[int] = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , _lowercase)
a__ : Tuple = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''')
torch.save(_lowercase , _lowercase)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
a__ : Optional[Any] = os.path.abspath(_lowercase)
a__ : str = os.path.abspath(_lowercase)
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''')
# Initialise PyTorch model
if transfo_xl_config_file == "":
a__ : List[str] = TransfoXLConfig()
else:
a__ : Any = TransfoXLConfig.from_json_file(_lowercase)
print(F'''Building PyTorch model from configuration: {config}''')
a__ : Any = TransfoXLLMHeadModel(_lowercase)
a__ : Optional[Any] = load_tf_weights_in_transfo_xl(_lowercase , _lowercase , _lowercase)
# Save pytorch-model
a__ : Tuple = os.path.join(_lowercase , _lowercase)
a__ : Optional[int] = os.path.join(_lowercase , _lowercase)
print(F'''Save PyTorch model to {os.path.abspath(_lowercase)}''')
torch.save(model.state_dict() , _lowercase)
print(F'''Save configuration file to {os.path.abspath(_lowercase)}''')
with open(_lowercase , """w""" , encoding="""utf-8""") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
_lowercase : Tuple =argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
_lowercase : Dict =parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 266
| 0
|
from __future__ import annotations
import math
UpperCAmelCase : Any = """2020.9.26"""
UpperCAmelCase : Optional[Any] = """xcodz-dot, cclaus, dhruvmanila"""
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if not all(isinstance(SCREAMING_SNAKE_CASE , (float, int) ) for val in locals().values() ):
a__ : Optional[Any] =f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(SCREAMING_SNAKE_CASE )
a__ : List[str] =((x * distance) / (z + distance)) * scale
a__ : Dict =((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("Axis must be a str" )
a__ : Optional[int] =locals()
del input_variables["axis"]
if not all(isinstance(SCREAMING_SNAKE_CASE , (float, int) ) for val in input_variables.values() ):
a__ : List[Any] =(
"Input values except axis must either be float or int: "
f'''{list(input_variables.values() )}'''
)
raise TypeError(SCREAMING_SNAKE_CASE )
a__ : List[Any] =(angle % 360) / 450 * 180 / math.pi
if axis == "z":
a__ : Tuple =x * math.cos(SCREAMING_SNAKE_CASE ) - y * math.sin(SCREAMING_SNAKE_CASE )
a__ : int =y * math.cos(SCREAMING_SNAKE_CASE ) + x * math.sin(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =z
elif axis == "x":
a__ : str =y * math.cos(SCREAMING_SNAKE_CASE ) - z * math.sin(SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =z * math.cos(SCREAMING_SNAKE_CASE ) + y * math.sin(SCREAMING_SNAKE_CASE )
a__ : str =x
elif axis == "y":
a__ : List[str] =x * math.cos(SCREAMING_SNAKE_CASE ) - z * math.sin(SCREAMING_SNAKE_CASE )
a__ : Any =z * math.cos(SCREAMING_SNAKE_CASE ) + x * math.sin(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }""")
| 95
|
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE = 1000000 ) ->int:
a__: int = limit + 1
a__: Optional[int] = [0] * limit
for first_term in range(1 , _SCREAMING_SNAKE_CASE ):
for n in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__: Any = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 290
| 0
|
'''simple docstring'''
import os
def lowerCamelCase ():
with open(os.path.dirname(_SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file:
__a : List[Any] = str(file.readlines()[0] )
__a : str = names.replace('"' , '' ).split(',' )
names.sort()
__a : Union[str, Any] = 0
__a : Tuple = 0
for i, name in enumerate(_SCREAMING_SNAKE_CASE ):
for letter in name:
name_score += ord(_SCREAMING_SNAKE_CASE ) - 64
total_score += (i + 1) * name_score
__a : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 352
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = None
A_ = None
A_ = None
A_ = None
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=1 , __a=0 , __a=2 , __a=512 , __a="cls" , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Any = project_dim
__a : Optional[Any] = pooler_fn
__a : int = learn_encoder
__a : str = use_attention_mask
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = [r"pooler", r"logit_scale"]
A_ = [r"position_ids", r"predictions.decoder.bias"]
A_ = "roberta"
A_ = RobertaSeriesConfig
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[Any] = XLMRobertaModel(__a )
__a : str = nn.Linear(config.hidden_size , config.project_dim )
__a : Optional[int] = getattr(__a , 'has_pre_transformation' , __a )
if self.has_pre_transformation:
__a : int = nn.Linear(config.hidden_size , config.project_dim )
__a : List[str] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Tuple = self.base_model(
input_ids=__a , attention_mask=__a , token_type_ids=__a , position_ids=__a , head_mask=__a , inputs_embeds=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_attentions=__a , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__a , )
if self.has_pre_transformation:
__a : Optional[Any] = outputs['hidden_states'][-2]
__a : Optional[int] = self.pre_LN(__a )
__a : Union[str, Any] = self.transformation_pre(__a )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__a : Optional[Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__a , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 294
| 0
|
'''simple docstring'''
from datetime import datetime
import requests
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
lowerCAmelCase_ : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(A__ ).content
if __name__ == "__main__":
__A : List[str] = input("Enter Video/IGTV url: ").strip()
__A : Optional[int] = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 120
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = (IPNDMScheduler,)
lowercase = (('num_inference_steps', 50),)
def __lowercase ( self : Optional[Any] , **lowerCamelCase : int ) -> Tuple:
lowerCAmelCase_ : Dict = {"""num_train_timesteps""": 10_00}
config.update(**lowerCamelCase )
return config
def __lowercase ( self : Dict , lowerCamelCase : Tuple=0 , **lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Dict = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
lowerCAmelCase_ : List[str] = self.dummy_sample
lowerCAmelCase_ : Optional[int] = 0.1 * sample
lowerCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : List[str] = self.get_scheduler_config(**lowerCamelCase )
lowerCAmelCase_ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
lowerCAmelCase_ : Optional[int] = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase_ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
lowerCAmelCase_ : List[Any] = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
lowerCAmelCase_ : str = dummy_past_residuals[:]
lowerCAmelCase_ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
lowerCAmelCase_ : Dict = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
lowerCAmelCase_ : Any = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowercase ( self : str ) -> Tuple:
pass
def __lowercase ( self : Tuple , lowerCamelCase : int=0 , **lowerCamelCase : Any ) -> int:
lowerCAmelCase_ : Optional[int] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : List[str] = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
lowerCAmelCase_ : Tuple = self.dummy_sample
lowerCAmelCase_ : Optional[int] = 0.1 * sample
lowerCAmelCase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase_ : Optional[int] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase_ : int = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase_ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase_ : Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase_ : Any = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
lowerCAmelCase_ : Optional[Any] = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
lowerCAmelCase_ : str = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowercase ( self : Optional[int] , **lowerCamelCase : Optional[Any] ) -> str:
lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ : List[Any] = self.get_scheduler_config(**lowerCamelCase )
lowerCAmelCase_ : str = scheduler_class(**lowerCamelCase )
lowerCAmelCase_ : Dict = 10
lowerCAmelCase_ : Optional[int] = self.dummy_model()
lowerCAmelCase_ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Dict = model(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def __lowercase ( self : Tuple ) -> Dict:
lowerCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : int = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Tuple = self.get_scheduler_config()
lowerCAmelCase_ : List[str] = scheduler_class(**lowerCamelCase )
lowerCAmelCase_ : int = self.dummy_sample
lowerCAmelCase_ : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase , """set_timesteps""" ):
scheduler.set_timesteps(lowerCamelCase )
elif num_inference_steps is not None and not hasattr(lowerCamelCase , """set_timesteps""" ):
lowerCAmelCase_ : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase_ : List[str] = dummy_past_residuals[:]
lowerCAmelCase_ : List[str] = scheduler.timesteps[5]
lowerCAmelCase_ : str = scheduler.timesteps[6]
lowerCAmelCase_ : Dict = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase_ : Any = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
lowerCAmelCase_ : Optional[int] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowercase ( self : Any ) -> int:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase , time_step=lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=lowerCamelCase )
def __lowercase ( self : int ) -> List[Any]:
lowerCAmelCase_ : List[Any] = self.full_loop()
lowerCAmelCase_ : Any = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 120
| 1
|
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
UpperCAmelCase = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
UpperCAmelCase = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
UpperCAmelCase = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
UpperCAmelCase = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __snake_case ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def __snake_case ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple=[1, 1_0, 1_0_0] , snake_case__ : List[str]=4 , snake_case__ : Tuple=3.0 ):
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=snake_case__ ) as executor:
lowercase :Optional[Any] = []
lowercase :Optional[Any] = Counter()
lowercase :Optional[int] = 0
lowercase :int = defaultdict(snake_case__ )
for task_id, (candidates, test_case) in enumerate(zip(snake_case__ , snake_case__ ) ):
for candidate in candidates:
lowercase :int = candidate + '''\n''' + test_case
lowercase :int = (test_program, timeout, task_id, completion_id[task_id])
lowercase :Optional[int] = executor.submit(snake_case__ , *snake_case__ )
futures.append(snake_case__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(snake_case__ ):
lowercase :Dict = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
lowercase , lowercase :List[str] = [], []
for result in results.values():
result.sort()
lowercase :int = [r[1]['''passed'''] for r in result]
total.append(len(snake_case__ ) )
correct.append(sum(snake_case__ ) )
lowercase :List[str] = np.array(snake_case__ )
lowercase :Optional[Any] = np.array(snake_case__ )
lowercase :str = k
lowercase :int = {f"""pass@{k}""": estimate_pass_at_k(snake_case__ , snake_case__ , snake_case__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase (a_ :Optional[Any] , a_ :Any , a_ :Any) -> List[Any]:
def estimator(a_ :int , a_ :int , a_ :int) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1))
if isinstance(a_ , a_):
lowercase :Optional[int] = itertools.repeat(a_ , len(a_))
else:
assert len(a_) == len(a_)
lowercase :List[Any] = iter(a_)
return np.array([estimator(int(a_) , int(a_) , a_) for n, c in zip(a_ , a_)])
| 172
|
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
UpperCAmelCase = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
UpperCAmelCase = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
UpperCAmelCase = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
UpperCAmelCase = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def __snake_case ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def __snake_case ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple=[1, 1_0, 1_0_0] , snake_case__ : List[str]=4 , snake_case__ : Tuple=3.0 ):
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=snake_case__ ) as executor:
lowercase :Optional[Any] = []
lowercase :Optional[Any] = Counter()
lowercase :Optional[int] = 0
lowercase :int = defaultdict(snake_case__ )
for task_id, (candidates, test_case) in enumerate(zip(snake_case__ , snake_case__ ) ):
for candidate in candidates:
lowercase :int = candidate + '''\n''' + test_case
lowercase :int = (test_program, timeout, task_id, completion_id[task_id])
lowercase :Optional[int] = executor.submit(snake_case__ , *snake_case__ )
futures.append(snake_case__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(snake_case__ ):
lowercase :Dict = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
lowercase , lowercase :List[str] = [], []
for result in results.values():
result.sort()
lowercase :int = [r[1]['''passed'''] for r in result]
total.append(len(snake_case__ ) )
correct.append(sum(snake_case__ ) )
lowercase :List[str] = np.array(snake_case__ )
lowercase :Optional[Any] = np.array(snake_case__ )
lowercase :str = k
lowercase :int = {f"""pass@{k}""": estimate_pass_at_k(snake_case__ , snake_case__ , snake_case__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCamelCase (a_ :Optional[Any] , a_ :Any , a_ :Any) -> List[Any]:
def estimator(a_ :int , a_ :int , a_ :int) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1))
if isinstance(a_ , a_):
lowercase :Optional[int] = itertools.repeat(a_ , len(a_))
else:
assert len(a_) == len(a_)
lowercase :List[Any] = iter(a_)
return np.array([estimator(int(a_) , int(a_) , a_) for n, c in zip(a_ , a_)])
| 172
| 1
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : list[str] | None = None , lowerCamelCase__ : dict[str, float] | None = None , lowerCamelCase__ : bool = False , ) -> tuple[int, float, str]:
lowerCamelCase_ : Tuple =cipher_alphabet or [chr(lowerCamelCase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase_ : Optional[Any] ={
"a": 0.0_8497,
"b": 0.0_1492,
"c": 0.0_2202,
"d": 0.0_4253,
"e": 0.1_1162,
"f": 0.0_2228,
"g": 0.0_2015,
"h": 0.0_6094,
"i": 0.0_7546,
"j": 0.0_0153,
"k": 0.0_1292,
"l": 0.0_4025,
"m": 0.0_2406,
"n": 0.0_6749,
"o": 0.0_7507,
"p": 0.0_1929,
"q": 0.0_0095,
"r": 0.0_7587,
"s": 0.0_6327,
"t": 0.0_9356,
"u": 0.0_2758,
"v": 0.0_0978,
"w": 0.0_2560,
"x": 0.0_0150,
"y": 0.0_1994,
"z": 0.0_0077,
}
else:
# Custom frequencies dictionary
lowerCamelCase_ : List[str] =frequencies_dict
if not case_sensitive:
lowerCamelCase_ : List[str] =ciphertext.lower()
# Chi squared statistic values
lowerCamelCase_ : dict[int, tuple[float, str]] ={}
# cycle through all of the shifts
for shift in range(len(lowerCamelCase__ ) ):
lowerCamelCase_ : Any =""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase_ : Tuple =(alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCamelCase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase_ : Optional[int] =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase_ : List[str] =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ : List[Any] =decrypted_with_shift.lower().count(lowerCamelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ : int =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ : List[str] =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ : str =decrypted_with_shift.count(lowerCamelCase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ : Dict =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ : List[str] =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase_ : Any =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowerCamelCase__ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase_ : int =min(
lowerCamelCase__ , key=lowerCamelCase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : List[Any] =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 144
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
A__ : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
A__ : List[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
A__ : Optional[int] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int=None , snake_case__ : Optional[int]=1 , snake_case__ : int="binary" , snake_case__ : List[str]=None ):
lowerCamelCase_ : str =fa_score(
snake_case__ , snake_case__ , labels=snake_case__ , pos_label=snake_case__ , average=snake_case__ , sample_weight=snake_case__ )
return {"f1": float(snake_case__ ) if score.size == 1 else score}
| 144
| 1
|
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
if n_term == "":
return []
SCREAMING_SNAKE_CASE = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(F"""1/{temp + 1}""" if series else """1""" )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 368
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE_ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE_ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE_ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE_ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : Optional[str] = "relu" ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(
lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=lowerCamelCase__ ,stride=lowerCamelCase__ ,padding=kernel_size // 2 ,groups=lowerCamelCase__ ,bias=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.convolution(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.normalization(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : RegNetConfig ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
SCREAMING_SNAKE_CASE = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
SCREAMING_SNAKE_CASE = self.embedder(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 2 ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,stride=lowerCamelCase__ ,bias=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = nn.BatchNormad(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.convolution(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.normalization(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> int:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ) ,nn.Sigmoid() ,)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.pooler(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.attention(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_state * attention
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 1 ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE = (
RegNetShortCut(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,groups=lowerCamelCase__ ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=lowerCamelCase__ ) ,)
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hidden_state
SCREAMING_SNAKE_CASE = self.layer(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.shortcut(lowerCamelCase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 1 ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE = max(1 ,out_channels // config.groups_width )
SCREAMING_SNAKE_CASE = (
RegNetShortCut(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE = nn.Sequential(
RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,groups=lowerCamelCase__ ,activation=config.hidden_act ) ,RegNetSELayer(lowerCamelCase__ ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(lowerCamelCase__ ,lowerCamelCase__ ,kernel_size=1 ,activation=lowerCamelCase__ ) ,)
SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hidden_state
SCREAMING_SNAKE_CASE = self.layer(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.shortcut(lowerCamelCase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE = self.activation(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : int ,lowerCamelCase__ : RegNetConfig ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int = 2 ,lowerCamelCase__ : int = 2 ,) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
SCREAMING_SNAKE_CASE = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,stride=lowerCamelCase__ ,) ,*[layer(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) for _ in range(depth - 1 )] ,)
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.layers(lowerCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : RegNetConfig ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
SCREAMING_SNAKE_CASE = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase__ ,config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,depth=lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE = stage_module(lowerCamelCase__ )
if output_hidden_states:
SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ ,hidden_states=lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = RegNetConfig
__snake_case : Union[str, Any] = "regnet"
__snake_case : Optional[Any] = "pixel_values"
__snake_case : List[Any] = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode="""fan_out""" ,nonlinearity="""relu""" )
elif isinstance(lowerCamelCase__ ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str=False ) -> str:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE_ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE_ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : str ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = RegNetEmbeddings(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = RegNetEncoder(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=lowerCamelCase__ ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Tensor ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.embedder(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.encoder(
lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ,return_dict=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = encoder_outputs[0]
SCREAMING_SNAKE_CASE = self.pooler(lowerCamelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ ,pooler_output=lowerCamelCase__ ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any ,lowerCamelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = config.num_labels
SCREAMING_SNAKE_CASE = RegNetModel(lowerCamelCase__ )
# classification head
SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=lowerCamelCase__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Optional[torch.FloatTensor] = None ,lowerCamelCase__ : Optional[torch.LongTensor] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE = self.regnet(lowerCamelCase__ ,output_hidden_states=lowerCamelCase__ ,return_dict=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE = self.classifier(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
SCREAMING_SNAKE_CASE = loss_fct(lowerCamelCase__ ,lowerCamelCase__ )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE = CrossEntropyLoss()
SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE = loss_fct(lowerCamelCase__ ,lowerCamelCase__ )
if not return_dict:
SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase__ ,logits=lowerCamelCase__ ,hidden_states=outputs.hidden_states )
| 193
| 0
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict = ["image_processor", "tokenizer"]
lowerCamelCase__ : Dict = "BlipImageProcessor"
lowerCamelCase__ : Union[str, Any] = "AutoTokenizer"
def __init__( self , a , a , a ) -> Optional[int]:
super().__init__(a , a )
# add QFormer tokenizer
lowercase__ : Dict = qformer_tokenizer
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowercase__ : List[Any] = BatchFeature()
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
encoding.update(a )
lowercase__ : Optional[int] = self.qformer_tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
lowercase__ : List[str] = qformer_text_encoding.pop('input_ids' )
lowercase__ : Any = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowercase__ : List[Any] = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def _UpperCAmelCase ( self , *a , **a ) -> List[str]:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Tuple:
return self.tokenizer.decode(*a , **a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _UpperCAmelCase ( self , a , **a ) -> Optional[int]:
if os.path.isfile(a ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(a , exist_ok=a )
lowercase__ : int = os.path.join(a , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(a )
return super().save_pretrained(a , **a )
@classmethod
def _UpperCAmelCase ( cls , a , **a ) -> str:
lowercase__ : str = AutoTokenizer.from_pretrained(a , subfolder='qformer_tokenizer' )
lowercase__ : int = cls._get_arguments_from_pretrained(a , **a )
args.append(a )
return cls(*a )
| 77
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict = ["image_processor", "tokenizer"]
lowerCamelCase__ : Dict = "BlipImageProcessor"
lowerCamelCase__ : Union[str, Any] = "AutoTokenizer"
def __init__( self , a , a , a ) -> Optional[int]:
super().__init__(a , a )
# add QFormer tokenizer
lowercase__ : Dict = qformer_tokenizer
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowercase__ : List[Any] = BatchFeature()
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
encoding.update(a )
lowercase__ : Optional[int] = self.qformer_tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
lowercase__ : List[str] = qformer_text_encoding.pop('input_ids' )
lowercase__ : Any = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowercase__ : List[Any] = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def _UpperCAmelCase ( self , *a , **a ) -> List[str]:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Tuple:
return self.tokenizer.decode(*a , **a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _UpperCAmelCase ( self , a , **a ) -> Optional[int]:
if os.path.isfile(a ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(a , exist_ok=a )
lowercase__ : int = os.path.join(a , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(a )
return super().save_pretrained(a , **a )
@classmethod
def _UpperCAmelCase ( cls , a , **a ) -> str:
lowercase__ : str = AutoTokenizer.from_pretrained(a , subfolder='qformer_tokenizer' )
lowercase__ : int = cls._get_arguments_from_pretrained(a , **a )
args.append(a )
return cls(*a )
| 77
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _lowercase ( snake_case_ ):
lowercase = 42
class _lowercase ( nn.Module ):
def __init__( self : Any , snake_case : str=3 , snake_case : Any=3 , snake_case : List[Any]=("DownEncoderBlock2D",) , snake_case : int=(6_4,) , snake_case : str=2 , snake_case : Dict=3_2 , snake_case : str="silu" , snake_case : Optional[Any]=True , ) -> int:
"""simple docstring"""
super().__init__()
UpperCamelCase_ : str = layers_per_block
UpperCamelCase_ : Dict = torch.nn.Convad(
snake_case , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase_ : Dict = None
UpperCamelCase_ : Union[str, Any] = nn.ModuleList([] )
# down
UpperCamelCase_ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(snake_case ):
UpperCamelCase_ : Optional[int] = output_channel
UpperCamelCase_ : Union[str, Any] = block_out_channels[i]
UpperCamelCase_ : Optional[int] = i == len(snake_case ) - 1
UpperCamelCase_ : Optional[Any] = get_down_block(
snake_case , num_layers=self.layers_per_block , in_channels=snake_case , out_channels=snake_case , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=snake_case , resnet_groups=snake_case , attention_head_dim=snake_case , temb_channels=snake_case , )
self.down_blocks.append(snake_case )
# mid
UpperCamelCase_ : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case , temb_channels=snake_case , )
# out
UpperCamelCase_ : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=snake_case , eps=1e-6 )
UpperCamelCase_ : Any = nn.SiLU()
UpperCamelCase_ : List[str] = 2 * out_channels if double_z else out_channels
UpperCamelCase_ : List[Any] = nn.Convad(block_out_channels[-1] , snake_case , 3 , padding=1 )
UpperCamelCase_ : Any = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = x
UpperCamelCase_ : Tuple = self.conv_in(snake_case )
if self.training and self.gradient_checkpointing:
def create_custom_forward(snake_case : List[Any] ):
def custom_forward(*snake_case : List[Any] ):
return module(*snake_case )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
UpperCamelCase_ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case ) , snake_case , use_reentrant=snake_case )
# middle
UpperCamelCase_ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case , use_reentrant=snake_case )
else:
for down_block in self.down_blocks:
UpperCamelCase_ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case ) , snake_case )
# middle
UpperCamelCase_ : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , snake_case )
else:
# down
for down_block in self.down_blocks:
UpperCamelCase_ : int = down_block(snake_case )
# middle
UpperCamelCase_ : Union[str, Any] = self.mid_block(snake_case )
# post-process
UpperCamelCase_ : Any = self.conv_norm_out(snake_case )
UpperCamelCase_ : int = self.conv_act(snake_case )
UpperCamelCase_ : List[str] = self.conv_out(snake_case )
return sample
class _lowercase ( nn.Module ):
def __init__( self : int , snake_case : List[str]=3 , snake_case : Union[str, Any]=3 , snake_case : List[str]=("UpDecoderBlock2D",) , snake_case : str=(6_4,) , snake_case : Union[str, Any]=2 , snake_case : Optional[int]=3_2 , snake_case : str="silu" , snake_case : List[str]="group" , ) -> int:
"""simple docstring"""
super().__init__()
UpperCamelCase_ : List[str] = layers_per_block
UpperCamelCase_ : Optional[Any] = nn.Convad(
snake_case , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase_ : List[Any] = None
UpperCamelCase_ : Dict = nn.ModuleList([] )
UpperCamelCase_ : Tuple = in_channels if norm_type == 'spatial' else None
# mid
UpperCamelCase_ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=snake_case , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case , temb_channels=snake_case , )
# up
UpperCamelCase_ : int = list(reversed(snake_case ) )
UpperCamelCase_ : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(snake_case ):
UpperCamelCase_ : Tuple = output_channel
UpperCamelCase_ : Any = reversed_block_out_channels[i]
UpperCamelCase_ : Optional[Any] = i == len(snake_case ) - 1
UpperCamelCase_ : Optional[int] = get_up_block(
snake_case , num_layers=self.layers_per_block + 1 , in_channels=snake_case , out_channels=snake_case , prev_output_channel=snake_case , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=snake_case , resnet_groups=snake_case , attention_head_dim=snake_case , temb_channels=snake_case , resnet_time_scale_shift=snake_case , )
self.up_blocks.append(snake_case )
UpperCamelCase_ : Dict = output_channel
# out
if norm_type == "spatial":
UpperCamelCase_ : Dict = SpatialNorm(block_out_channels[0] , snake_case )
else:
UpperCamelCase_ : str = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=snake_case , eps=1e-6 )
UpperCamelCase_ : List[Any] = nn.SiLU()
UpperCamelCase_ : str = nn.Convad(block_out_channels[0] , snake_case , 3 , padding=1 )
UpperCamelCase_ : int = False
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Any , snake_case : Union[str, Any]=None ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = z
UpperCamelCase_ : Tuple = self.conv_in(snake_case )
UpperCamelCase_ : Tuple = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(snake_case : Any ):
def custom_forward(*snake_case : Optional[int] ):
return module(*snake_case )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
UpperCamelCase_ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case , snake_case , use_reentrant=snake_case )
UpperCamelCase_ : Any = sample.to(snake_case )
# up
for up_block in self.up_blocks:
UpperCamelCase_ : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(snake_case ) , snake_case , snake_case , use_reentrant=snake_case )
else:
# middle
UpperCamelCase_ : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , snake_case , snake_case )
UpperCamelCase_ : List[str] = sample.to(snake_case )
# up
for up_block in self.up_blocks:
UpperCamelCase_ : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case ) , snake_case , snake_case )
else:
# middle
UpperCamelCase_ : List[Any] = self.mid_block(snake_case , snake_case )
UpperCamelCase_ : List[Any] = sample.to(snake_case )
# up
for up_block in self.up_blocks:
UpperCamelCase_ : int = up_block(snake_case , snake_case )
# post-process
if latent_embeds is None:
UpperCamelCase_ : List[str] = self.conv_norm_out(snake_case )
else:
UpperCamelCase_ : Tuple = self.conv_norm_out(snake_case , snake_case )
UpperCamelCase_ : Optional[Any] = self.conv_act(snake_case )
UpperCamelCase_ : Optional[int] = self.conv_out(snake_case )
return sample
class _lowercase ( nn.Module ):
def __init__( self : int , snake_case : Union[str, Any] , snake_case : Any , snake_case : Tuple , snake_case : List[str]=None , snake_case : Dict="random" , snake_case : Dict=False , snake_case : Dict=True ) -> Dict:
"""simple docstring"""
super().__init__()
UpperCamelCase_ : Tuple = n_e
UpperCamelCase_ : Any = vq_embed_dim
UpperCamelCase_ : Optional[Any] = beta
UpperCamelCase_ : List[str] = legacy
UpperCamelCase_ : List[str] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCamelCase_ : int = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
UpperCamelCase_ : Any = self.used.shape[0]
UpperCamelCase_ : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCamelCase_ : int = self.re_embed
UpperCamelCase_ : Any = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices." )
else:
UpperCamelCase_ : Optional[Any] = n_e
UpperCamelCase_ : Any = sane_index_shape
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : Tuple = inds.shape
assert len(snake_case ) > 1
UpperCamelCase_ : List[str] = inds.reshape(ishape[0] , -1 )
UpperCamelCase_ : Optional[int] = self.used.to(snake_case )
UpperCamelCase_ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
UpperCamelCase_ : Tuple = match.argmax(-1 )
UpperCamelCase_ : List[str] = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCamelCase_ : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCamelCase_ : Optional[Any] = self.unknown_index
return new.reshape(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = inds.shape
assert len(snake_case ) > 1
UpperCamelCase_ : int = inds.reshape(ishape[0] , -1 )
UpperCamelCase_ : str = self.used.to(snake_case )
if self.re_embed > self.used.shape[0]: # extra token
UpperCamelCase_ : Optional[int] = 0 # simply set to zero
UpperCamelCase_ : int = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , snake_case )
return back.reshape(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : str = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCamelCase_ : List[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCamelCase_ : List[str] = torch.argmin(torch.cdist(snake_case , self.embedding.weight ) , dim=1 )
UpperCamelCase_ : Optional[Any] = self.embedding(snake_case ).view(z.shape )
UpperCamelCase_ : int = None
UpperCamelCase_ : List[str] = None
# compute loss for embedding
if not self.legacy:
UpperCamelCase_ : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCamelCase_ : Optional[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCamelCase_ : Any = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCamelCase_ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCamelCase_ : Tuple = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCamelCase_ : int = self.remap_to_used(snake_case )
UpperCamelCase_ : Optional[Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCamelCase_ : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.remap is not None:
UpperCamelCase_ : Any = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCamelCase_ : Optional[int] = self.unmap_to_all(snake_case )
UpperCamelCase_ : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCamelCase_ : List[str] = self.embedding(snake_case )
if shape is not None:
UpperCamelCase_ : Tuple = z_q.view(snake_case )
# reshape back to match original input shape
UpperCamelCase_ : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _lowercase ( snake_case_ ):
def __init__( self : Optional[int] , snake_case : Tuple , snake_case : str=False ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = parameters
UpperCamelCase_, UpperCamelCase_ : Optional[int] = torch.chunk(snake_case , 2 , dim=1 )
UpperCamelCase_ : Dict = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCamelCase_ : List[str] = deterministic
UpperCamelCase_ : Any = torch.exp(0.5 * self.logvar )
UpperCamelCase_ : Union[str, Any] = torch.exp(self.logvar )
if self.deterministic:
UpperCamelCase_ : str = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = randn_tensor(
self.mean.shape , generator=snake_case , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCamelCase_ : Dict = self.mean + self.std * sample
return x
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] , snake_case : str=[1, 2, 3] ) -> int:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
UpperCamelCase_ : Dict = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
return self.mean
| 50
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Dict=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
UpperCamelCase_ : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowercase ( snake_case_ ):
def __init__( self : Tuple , snake_case : Optional[int] , snake_case : Optional[Any]=1_3 , snake_case : Optional[Any]=7 , snake_case : Any=True , snake_case : Optional[int]=True , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=9_9 , snake_case : int=3_2 , snake_case : str=3_2 , snake_case : str=2 , snake_case : List[Any]=4 , snake_case : Tuple=3_7 , snake_case : Any="gelu" , snake_case : str=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[Any]=5_1_2 , snake_case : Optional[int]=1_6 , snake_case : List[Any]=2 , snake_case : Dict=0.02 , snake_case : List[str]=3 , snake_case : Any=4 , snake_case : Any=None , ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = parent
UpperCamelCase_ : Any = batch_size
UpperCamelCase_ : List[str] = seq_length
UpperCamelCase_ : List[Any] = is_training
UpperCamelCase_ : Optional[Any] = use_input_mask
UpperCamelCase_ : Tuple = use_token_type_ids
UpperCamelCase_ : Optional[int] = use_labels
UpperCamelCase_ : Dict = vocab_size
UpperCamelCase_ : Dict = hidden_size
UpperCamelCase_ : List[str] = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : Optional[int] = intermediate_size
UpperCamelCase_ : int = hidden_act
UpperCamelCase_ : List[str] = hidden_dropout_prob
UpperCamelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_ : Tuple = max_position_embeddings
UpperCamelCase_ : Tuple = type_vocab_size
UpperCamelCase_ : Optional[Any] = type_sequence_label_size
UpperCamelCase_ : Any = initializer_range
UpperCamelCase_ : Tuple = num_labels
UpperCamelCase_ : Tuple = num_choices
UpperCamelCase_ : Tuple = scope
UpperCamelCase_ : Dict = embedding_size
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCamelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Tuple = None
UpperCamelCase_ : Dict = None
if self.use_labels:
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ : Union[str, Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : str , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Tuple , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = TFMobileBertModel(config=snake_case )
UpperCamelCase_ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Union[str, Any] = model(snake_case )
UpperCamelCase_ : Optional[Any] = [input_ids, input_mask]
UpperCamelCase_ : List[Any] = model(snake_case )
UpperCamelCase_ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = TFMobileBertForMaskedLM(config=snake_case )
UpperCamelCase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Any , snake_case : int , snake_case : int , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = TFMobileBertForNextSentencePrediction(config=snake_case )
UpperCamelCase_ : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : int , snake_case : str , snake_case : str , snake_case : Any , snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[str] = TFMobileBertForPreTraining(config=snake_case )
UpperCamelCase_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Any = model(snake_case )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Dict , snake_case : List[str] , snake_case : str , snake_case : List[str] , snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.num_labels
UpperCamelCase_ : Dict = TFMobileBertForSequenceClassification(config=snake_case )
UpperCamelCase_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.num_choices
UpperCamelCase_ : Dict = TFMobileBertForMultipleChoice(config=snake_case )
UpperCamelCase_ : int = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : int = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : List[str] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : Optional[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Optional[int] , snake_case : Tuple , snake_case : str , snake_case : str , snake_case : Optional[int] , snake_case : str , snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Any = self.num_labels
UpperCamelCase_ : Optional[Any] = TFMobileBertForTokenClassification(config=snake_case )
UpperCamelCase_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Tuple , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = TFMobileBertForQuestionAnswering(config=snake_case )
UpperCamelCase_ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Tuple = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
),
) : Union[str, Any] = config_and_inputs
UpperCamelCase_ : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
UpperCamelCase_ : str = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
UpperCamelCase_ : Optional[Any] = TFMobileBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Any = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
UpperCamelCase_ : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase_ : List[str] = model(snake_case )[0]
UpperCamelCase_ : Any = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case )
UpperCamelCase_ : Dict = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
| 50
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = (IPNDMScheduler,)
snake_case = (('''num_inference_steps''', 50),)
def lowerCAmelCase ( self : int , **__UpperCAmelCase : Dict ):
'''simple docstring'''
_A = {"num_train_timesteps": 1000}
config.update(**__UpperCAmelCase )
return config
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[str]=0 , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop("num_inference_steps" , __UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config(**__UpperCAmelCase )
_A = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase )
_A = scheduler_class.from_pretrained(__UpperCAmelCase )
new_scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[:]
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
_A = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
_A = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=0 , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop("num_inference_steps" , __UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**__UpperCAmelCase )
scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_A = dummy_past_residuals[:]
if time_step is None:
_A = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCAmelCase )
_A = scheduler_class.from_pretrained(__UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_A = dummy_past_residuals[:]
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
_A = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
_A = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase ( self : str , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**__UpperCAmelCase )
_A = scheduler_class(**__UpperCAmelCase )
_A = 10
_A = self.dummy_model()
_A = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = model(__UpperCAmelCase , __UpperCAmelCase )
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_A = model(__UpperCAmelCase , __UpperCAmelCase )
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
return sample
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop("num_inference_steps" , __UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**__UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , "set_timesteps" ):
_A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_A = dummy_past_residuals[:]
_A = scheduler.timesteps[5]
_A = scheduler.timesteps[6]
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
_A = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase , time_step=__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.full_loop()
_A = torch.mean(torch.abs(__UpperCAmelCase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 79
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :str = "cpu" , lowerCAmelCase_ :Union[str, None] = None )->None:
'''simple docstring'''
snake_case_ = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
snake_case_ = v.half()
if save_path is None: # overwrite src_path
snake_case_ = src_path
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
fire.Fire(convert)
| 159
| 0
|
from __future__ import annotations
from math import pow, sqrt
def _UpperCamelCase (a__ :float , a__ :float , a__ :float ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase__ , 2 ) - pow(lowercase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase__ , 2 ) - pow(lowercase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase__ , 2 ) + pow(lowercase__ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 87
| 0
|
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : List[Any] =["input_ids", "attention_mask"]
def __init__( self : Tuple , a : Optional[Any]="</s>" , a : Dict="<unk>" , a : List[Any]="<pad>" , a : List[str]=1_25 , a : Optional[Any]=None , **a : Any , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
__lowerCamelCase = [f"""<extra_id_{i}>""" for i in range(a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCamelCase = len(set(filter(lambda a : bool('''extra_id''' in str(a ) ) , a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
super().__init__(
eos_token=a , unk_token=a , pad_token=a , extra_ids=a , additional_special_tokens=a , **a , )
__lowerCamelCase = extra_ids
__lowerCamelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCamelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCamelCase = len(self.special_tokens_encoder )
__lowerCamelCase = len(a )
for i, token in enumerate(a ):
__lowerCamelCase = self.vocab_size + i - n
__lowerCamelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a )) + [1]
return ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : List[int] ):
"""simple docstring"""
if len(a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : Any , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = self._add_eos_if_not_present(a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCamelCase = self._add_eos_if_not_present(a )
return token_ids_a + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self : str , a : str ):
"""simple docstring"""
__lowerCamelCase = [chr(a ) for i in text.encode('''utf-8''' )]
return tokens
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple ):
"""simple docstring"""
if token in self.special_tokens_encoder:
__lowerCamelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCamelCase = self.added_tokens_encoder[token]
elif len(a ) != 1:
__lowerCamelCase = self.unk_token_id
else:
__lowerCamelCase = ord(a ) + self._num_special_tokens
return token_id
def SCREAMING_SNAKE_CASE__ ( self : int , a : Dict ):
"""simple docstring"""
if index in self.special_tokens_decoder:
__lowerCamelCase = self.special_tokens_decoder[index]
else:
__lowerCamelCase = chr(index - self._num_special_tokens )
return token
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Dict ):
"""simple docstring"""
__lowerCamelCase = b''''''
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCamelCase = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
__lowerCamelCase = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
__lowerCamelCase = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
__lowerCamelCase = token.encode('''utf-8''' )
else:
__lowerCamelCase = bytes([ord(a )] )
bstring += tok_string
__lowerCamelCase = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : str , a : Optional[str] = None ):
"""simple docstring"""
return ()
| 67
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
else:
return a * actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
if b < 0:
return 1 / actual_power(UpperCamelCase__ , UpperCamelCase__ )
return actual_power(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 67
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int:
'''simple docstring'''
__lowercase= {}
if train_file is not None:
__lowercase= [train_file]
if eval_file is not None:
__lowercase= [eval_file]
if test_file is not None:
__lowercase= [test_file]
__lowercase= datasets.load_dataset('csv' , data_files=lowercase__ )
__lowercase= list(ds[list(files.keys() )[0]].features.keys() )
__lowercase= features_name.pop(lowercase__ )
__lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowercase= {label: i for i, label in enumerate(lowercase__ )}
__lowercase= tokenizer.model_input_names
__lowercase= {}
if len(lowercase__ ) == 1:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , )
elif len(lowercase__ ) == 2:
for k in files.keys():
__lowercase= ds[k].map(
lambda lowercase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowercase= {k: v for k, v in ex.items() if k in input_names}
__lowercase= labelaid[ex[label_name]]
yield (d, label)
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowercase= (
tf.data.Dataset.from_generator(
lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} )
UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} )
UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} )
UpperCamelCase_ : int =field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ : bool =field(
default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
UpperCamelCase_ : str =field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCamelCase_ : Optional[str] =field(
default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def _lowerCamelCase( ) -> Optional[Any]:
'''simple docstring'''
__lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase= AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase, __lowercase, __lowercase, __lowercase= get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowercase= AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowercase= TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase__ ) -> Dict:
__lowercase= np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowercase= TFTrainer(
model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowercase= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowercase= trainer.evaluate()
__lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(lowercase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase__ )
return results
if __name__ == "__main__":
main()
| 361
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = "dinat"
A = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(self , _UpperCAmelCase=4 , _UpperCAmelCase=3 , _UpperCAmelCase=6_4 , _UpperCAmelCase=[3, 4, 6, 5] , _UpperCAmelCase=[2, 4, 8, 1_6] , _UpperCAmelCase=7 , _UpperCAmelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _UpperCAmelCase=3.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Any:
super().__init__(**_UpperCAmelCase )
__UpperCamelCase : Tuple = patch_size
__UpperCamelCase : Tuple = num_channels
__UpperCamelCase : Optional[Any] = embed_dim
__UpperCamelCase : Any = depths
__UpperCamelCase : List[str] = len(_UpperCAmelCase )
__UpperCamelCase : Union[str, Any] = num_heads
__UpperCamelCase : str = kernel_size
__UpperCamelCase : Optional[Any] = dilations
__UpperCamelCase : Any = mlp_ratio
__UpperCamelCase : Optional[int] = qkv_bias
__UpperCamelCase : Any = hidden_dropout_prob
__UpperCamelCase : Optional[int] = attention_probs_dropout_prob
__UpperCamelCase : int = drop_path_rate
__UpperCamelCase : Any = hidden_act
__UpperCamelCase : Any = layer_norm_eps
__UpperCamelCase : str = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCamelCase : Union[str, Any] = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
__UpperCamelCase : Union[str, Any] = layer_scale_init_value
__UpperCamelCase : Tuple = ["stem"] + [f"stage{idx}" for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
__UpperCamelCase , __UpperCamelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
| 298
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 298
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_A = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : str=None ):
__UpperCamelCase =True
while ask_again:
__UpperCamelCase =input(SCREAMING_SNAKE_CASE__ )
try:
if default is not None and len(SCREAMING_SNAKE_CASE__ ) == 0:
return default
return convert_value(SCREAMING_SNAKE_CASE__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=[] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : str=0 ):
__UpperCamelCase =BulletMenu(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =menu.run(default_choice=SCREAMING_SNAKE_CASE__ )
return convert_value(SCREAMING_SNAKE_CASE__ ) if convert_value is not None else result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =int(SCREAMING_SNAKE_CASE__ )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =int(SCREAMING_SNAKE_CASE__ )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =int(SCREAMING_SNAKE_CASE__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =int(SCREAMING_SNAKE_CASE__ )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =int(SCREAMING_SNAKE_CASE__ )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
return {"yes": True, "no": False}[value.lower()]
class UpperCAmelCase__ ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def _a ( self , A_ , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =super()._format_usage(A_ , A_ , A_ , A_ )
__UpperCamelCase =usage.replace('<command> [<args>] ' , '' )
return usage
| 117
|
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
if b == 0:
return (1, 0)
((__UpperCamelCase) , (__UpperCamelCase)) =extended_euclid(SCREAMING_SNAKE_CASE__ , a % b )
__UpperCamelCase =a // b
return (y, x - k * y)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
((__UpperCamelCase) , (__UpperCamelCase)) =extended_euclid(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =na * na
__UpperCamelCase =ra * x * na + ra * y * na
return (n % m + m) % m
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
((__UpperCamelCase) , (__UpperCamelCase)) =extended_euclid(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if b < 0:
__UpperCamelCase =(b % n + n) % n
return b
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase , __UpperCamelCase =invert_modulo(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), invert_modulo(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =na * na
__UpperCamelCase =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 117
| 1
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE : Optional[int] = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
SCREAMING_SNAKE_CASE : List[str] = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
SCREAMING_SNAKE_CASE : Optional[int] = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
return float((preds == labels).mean() )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_lowercase : Optional[int] = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
_lowercase : Tuple = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
_lowercase : Optional[int] = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCamelCase( datasets.Metric ):
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]')
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32'),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32'),
}), codebase_urls=[], reference_urls=[], format='numpy', )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase, lowerCamelCase)}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCamelCase, lowerCamelCase)
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCamelCase, lowerCamelCase)
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCamelCase, lowerCamelCase)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]')
| 21
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase : List[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=None , ):
"""simple docstring"""
if attention_mask is None:
lowerCamelCase__ : Any =np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase__ : str =np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase__ : Dict =np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase__ : str =np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict =np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : Tuple, lowerCamelCase : List[str]=13, lowerCamelCase : Dict=7, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : int=99, lowerCamelCase : Union[str, Any]=16, lowerCamelCase : List[str]=2, lowerCamelCase : int=4, lowerCamelCase : Tuple=4, lowerCamelCase : Optional[Any]="gelu", lowerCamelCase : List[str]=0.1, lowerCamelCase : str=0.1, lowerCamelCase : Optional[int]=32, lowerCamelCase : List[str]=2, lowerCamelCase : Tuple=1, lowerCamelCase : Optional[int]=0, lowerCamelCase : int=0.02, )-> Optional[Any]:
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : Optional[int] =use_labels
lowerCamelCase__ : List[str] =vocab_size
lowerCamelCase__ : List[Any] =hidden_size
lowerCamelCase__ : List[Any] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : List[Any] =intermediate_size
lowerCamelCase__ : Union[str, Any] =hidden_act
lowerCamelCase__ : Optional[Any] =hidden_dropout_prob
lowerCamelCase__ : Tuple =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =max_position_embeddings
lowerCamelCase__ : List[Any] =eos_token_id
lowerCamelCase__ : Tuple =pad_token_id
lowerCamelCase__ : Union[str, Any] =bos_token_id
lowerCamelCase__ : List[Any] =initializer_range
def snake_case ( self : Optional[Any] )-> str:
lowerCamelCase__ : Dict =np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
lowerCamelCase__ : Union[str, Any] =np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
lowerCamelCase__ : Dict =shift_tokens_right(lowerCamelCase, 1, 2 )
lowerCamelCase__ : Optional[Any] =BlenderbotConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=lowerCamelCase, )
lowerCamelCase__ : List[str] =prepare_blenderbot_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return config, inputs_dict
def snake_case ( self : str )-> Optional[Any]:
lowerCamelCase__ , lowerCamelCase__ : Any =self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self : int, lowerCamelCase : Tuple, lowerCamelCase : Dict, lowerCamelCase : Tuple )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =20
lowerCamelCase__ : Optional[int] =model_class_name(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ : Any =model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Dict =jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype='''i4''' )
lowerCamelCase__ : List[Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase__ : int =model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : Optional[int] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCamelCase__ : Union[str, Any] =model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : int =model.decode(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
def snake_case ( self : str, lowerCamelCase : str, lowerCamelCase : str, lowerCamelCase : str )-> List[str]:
lowerCamelCase__ : List[Any] =20
lowerCamelCase__ : List[Any] =model_class_name(lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase__ , lowerCamelCase__ : str =(
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase__ : Tuple =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
lowerCamelCase__ : List[str] =model.init_cache(decoder_input_ids.shape[0], lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
lowerCamelCase__ : List[Any] =model.decode(
decoder_input_ids[:, :-1], lowerCamelCase, decoder_attention_mask=lowerCamelCase, past_key_values=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : List[str] =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype='''i4''' )
lowerCamelCase__ : Optional[Any] =model.decode(
decoder_input_ids[:, -1:], lowerCamelCase, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=lowerCamelCase, decoder_position_ids=lowerCamelCase, )
lowerCamelCase__ : Optional[Any] =model.decode(lowerCamelCase, lowerCamelCase, decoder_attention_mask=lowerCamelCase )
lowerCamelCase__ : List[Any] =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=F'''Max diff is {diff}''' )
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
_a = 9_9
def snake_case ( self : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
lowerCamelCase__ : Any =input_ids.shape[0]
lowerCamelCase__ : Any =BlenderbotConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def snake_case ( self : Any )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =self._get_config_and_data()
lowerCamelCase__ : int =FlaxBlenderbotForConditionalGeneration(lowerCamelCase )
lowerCamelCase__ : str =lm_model(input_ids=lowerCamelCase )
lowerCamelCase__ : List[Any] =(batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, lowerCamelCase )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Union[str, Any] =BlenderbotConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
lowerCamelCase__ : Union[str, Any] =FlaxBlenderbotForConditionalGeneration(lowerCamelCase )
lowerCamelCase__ : List[Any] =np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
lowerCamelCase__ : Optional[Any] =np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
lowerCamelCase__ : Optional[int] =lm_model(input_ids=lowerCamelCase, decoder_input_ids=lowerCamelCase )
lowerCamelCase__ : List[str] =(*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape, lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
lowerCamelCase__ : Optional[Any] =shift_tokens_right(lowerCamelCase, 1, 2 )
lowerCamelCase__ : str =np.equal(lowerCamelCase, 1 ).astype(np.floataa ).sum()
lowerCamelCase__ : List[str] =np.equal(lowerCamelCase, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(lowerCamelCase, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
_a = True
_a = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_a = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case ( self : Union[str, Any] )-> List[str]:
lowerCamelCase__ : str =FlaxBlenderbotModelTester(self )
def snake_case ( self : Optional[int] )-> int:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[str] )-> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def snake_case ( self : List[Any] )-> Tuple:
lowerCamelCase__ , lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : List[Any] =self._prepare_for_class(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : int =model_class(lowerCamelCase )
@jax.jit
def encode_jitted(lowerCamelCase : int, lowerCamelCase : Union[str, Any]=None, **lowerCamelCase : List[str] ):
return model.encode(input_ids=lowerCamelCase, attention_mask=lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : Any =encode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Dict =encode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def snake_case ( self : List[str] )-> Dict:
lowerCamelCase__ , lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Optional[Any] =model_class(lowerCamelCase )
lowerCamelCase__ : List[Any] =model.encode(inputs_dict['''input_ids'''], inputs_dict['''attention_mask'''] )
lowerCamelCase__ : Optional[int] ={
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Tuple ):
return model.decode(
decoder_input_ids=lowerCamelCase, decoder_attention_mask=lowerCamelCase, encoder_outputs=lowerCamelCase, )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : Union[str, Any] =decode_jitted(**lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Optional[Any] =decode_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ), len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase, lowerCamelCase ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def snake_case ( self : Tuple )-> Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : int =model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase__ : Union[str, Any] =np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skipUnless(jax_device != '''cpu''', '''3B test too slow on CPU.''' )
@slow
def snake_case ( self : Optional[int] )-> Tuple:
lowerCamelCase__ : List[Any] ={'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
lowerCamelCase__ : Optional[int] ={'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCamelCase__ : Tuple =FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''', from_pt=lowerCamelCase )
lowerCamelCase__ : int =BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCamelCase__ : str =['''Sam''']
lowerCamelCase__ : Union[str, Any] =tokenizer(lowerCamelCase, return_tensors='''jax''' )
lowerCamelCase__ : Tuple =model.generate(**lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Tuple ='''Sam is a great name. It means "sun" in Gaelic.'''
lowerCamelCase__ : Union[str, Any] =tokenizer.batch_decode(lowerCamelCase, **lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 238
| 0
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : int = DistilBertTokenizer
__snake_case : Union[str, Any] = DistilBertTokenizerFast
__snake_case : Tuple = True
@slow
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 356
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[Any] = OpenAIGPTTokenizer
__snake_case : Dict = OpenAIGPTTokenizerFast
__snake_case : Optional[Any] = True
__snake_case : Dict = False
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
_SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase_ ) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_SCREAMING_SNAKE_CASE = """lower"""
_SCREAMING_SNAKE_CASE = ["""low""", """er</w>"""]
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokens + ["""<unk>"""]
_SCREAMING_SNAKE_CASE = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Union[str, Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# Simple input
_SCREAMING_SNAKE_CASE = """This is a simple input"""
_SCREAMING_SNAKE_CASE = ["""This is a simple input 1""", """This is a simple input 2"""]
_SCREAMING_SNAKE_CASE = ("""This is a simple input""", """This is a pair""")
_SCREAMING_SNAKE_CASE = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(UpperCAmelCase_ , tokenizer_r.encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
UpperCAmelCase_ , tokenizer_r.batch_encode_plus , UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="""max_length""" , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __UpperCAmelCase (_UpperCAmelCase ):
pass
| 125
| 0
|
from __future__ import annotations
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[list[str]] , SCREAMING_SNAKE_CASE_ : int , ):
'''simple docstring'''
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : list[list[str]] = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE_ )
print('' )
print(len(SCREAMING_SNAKE_CASE_ ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 214
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a , a , a = 0):
lowercase__ , lowercase__ : Dict = row, column
lowercase__ : Optional[Any] = [[default_value for c in range(a)] for r in range(a)]
def __str__( self):
lowercase__ : Tuple = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowercase__ : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
lowercase__ : List[str] = max(a , len(str(a)))
lowercase__ : Optional[int] = f"""%{max_element_length}s"""
# Make string and return
def single_line(a) -> str:
nonlocal string_format_identifier
lowercase__ : Tuple = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(a) for row_vector in self.array)
return s
def __repr__( self):
return str(self)
def snake_case_ ( self , a):
if not (isinstance(a , (list, tuple)) and len(a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , a):
assert self.validate_indicies(a)
return self.array[loc[0]][loc[1]]
def __setitem__( self , a , a):
assert self.validate_indicies(a)
lowercase__ : List[str] = value
def __add__( self , a):
assert isinstance(a , a)
assert self.row == another.row and self.column == another.column
# Add
lowercase__ : List[str] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowercase__ : Dict = self[r, c] + another[r, c]
return result
def __neg__( self):
lowercase__ : List[Any] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowercase__ : int = -self[r, c]
return result
def __sub__( self , a):
return self + (-another)
def __mul__( self , a):
if isinstance(a , (int, float)): # Scalar multiplication
lowercase__ : Optional[Any] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
lowercase__ : Union[str, Any] = self[r, c] * another
return result
elif isinstance(a , a): # Matrix multiplication
assert self.column == another.row
lowercase__ : List[Any] = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowercase__ : Optional[Any] = f"""Unsupported type given for another ({type(a)})"""
raise TypeError(a)
def snake_case_ ( self):
lowercase__ : Optional[int] = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
lowercase__ : str = self[r, c]
return result
def snake_case_ ( self , a , a):
assert isinstance(a , a) and isinstance(a , a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowercase__ : int = v.transpose()
lowercase__ : Tuple = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Any = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowercase__ : str = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
lowercase__ : Optional[Any] = Matrix(3 , 1 , 0 )
lowercase__ , lowercase__ , lowercase__ : List[Any] = 1, 2, -3
lowercase__ : Union[str, Any] = Matrix(3 , 1 , 0 )
lowercase__ , lowercase__ , lowercase__ : Any = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
def snake_case__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 214
| 1
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__A = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__A = "cuda" if torch.cuda.is_available() else "cpu"
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=1_0_0 , __UpperCAmelCase=" " ) -> List[str]:
lowercase__: str = text.split(__UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> dict:
lowercase__, lowercase__: Any = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__UpperCAmelCase ):
titles.append(title if title is not None else '''''' )
texts.append(__UpperCAmelCase )
return {"title": titles, "text": texts}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> dict:
lowercase__: Union[str, Any] = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase__: Optional[Any] = ctx_encoder(input_ids.to(device=__UpperCAmelCase ) , return_dict=__UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int:
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__: Tuple = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__: int = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase__: Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__UpperCAmelCase )
lowercase__: Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase__: Dict = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase__: Optional[Any] = dataset.map(
partial(__UpperCAmelCase , ctx_encoder=__UpperCAmelCase , ctx_tokenizer=__UpperCAmelCase ) , batched=__UpperCAmelCase , batch_size=processing_args.batch_size , features=__UpperCAmelCase , )
# And finally save your dataset
lowercase__: Any = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__: int = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__UpperCAmelCase )
# And save the index
lowercase__: str = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
default=str(Path(_UpperCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) ,metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} ,)
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} ,)
_UpperCAmelCase :str = field(
default="facebook/rag-sequence-nq" ,metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} ,)
_UpperCAmelCase :str = field(
default="facebook/dpr-ctx_encoder-multiset-base" ,metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=str(Path(_UpperCAmelCase ).parent / "test_run" / "dummy-kb" ) ,metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} ,)
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} ,)
_UpperCAmelCase :int = field(
default=16 ,metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} ,)
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :int = field(
default=768 ,metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} ,)
_UpperCAmelCase :int = field(
default=128 ,metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__A = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__A ,__A ,__A = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__A = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 2
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
metadata={"help": "The output directory where the model will be written."} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Dict = HfArgumentParser((ModelArguments,) )
((lowercase__), ): List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__: Tuple = True
lowercase__: int = True
lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__: int = decoder_config.decoder_start_token_id
lowercase__: Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__: Tuple = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__: Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__: Optional[Any] = decoder_config.eos_token_id
lowercase__: Tuple = decoder_start_token_id
lowercase__: Dict = pad_token_id
lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 2
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Optional[int]= logging.get_logger(__name__)
_a : Dict= {"vocab_file": "spiece.model"}
_a : int= {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
_a : int= {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
_a : Optional[Any]= "▁"
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self : Optional[Any] , _A : List[str] , _A : int=True , _A : Optional[int]=True , _A : Any=False , _A : str="[CLS]" , _A : Dict="[SEP]" , _A : Any="<unk>" , _A : List[Any]="[SEP]" , _A : Any="<pad>" , _A : List[str]="[CLS]" , _A : int="[MASK]" , _A : Optional[Dict[str, Any]] = None , **_A : List[str] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__snake_case : Dict = (
AddedToken(_A , lstrip=_A , rstrip=_A , normalized=_A)
if isinstance(_A , _A)
else mask_token
)
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__snake_case : Optional[int] = do_lower_case
__snake_case : Any = remove_space
__snake_case : Any = keep_accents
__snake_case : Dict = vocab_file
__snake_case : int = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_A)
@property
def _lowercase (self : str) -> Optional[Any]:
return len(self.sp_model)
def _lowercase (self : Dict) -> List[str]:
__snake_case : int = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self : Optional[int]) -> int:
__snake_case : List[Any] = self.__dict__.copy()
__snake_case : Any = None
return state
def __setstate__(self : Union[str, Any] , _A : Optional[Any]) -> int:
__snake_case : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__snake_case : Optional[int] = {}
__snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowercase (self : Tuple , _A : int) -> int:
if self.remove_space:
__snake_case : int = ' '.join(inputs.strip().split())
else:
__snake_case : str = inputs
__snake_case : Optional[int] = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
__snake_case : Tuple = unicodedata.normalize('NFKD' , _A)
__snake_case : Optional[Any] = ''.join([c for c in outputs if not unicodedata.combining(_A)])
if self.do_lower_case:
__snake_case : Union[str, Any] = outputs.lower()
return outputs
def _lowercase (self : Any , _A : str) -> List[str]:
__snake_case : Union[str, Any] = self.preprocess_text(_A)
__snake_case : Optional[Any] = self.sp_model.encode(_A , out_type=_A)
__snake_case : Tuple = []
for piece in pieces:
if len(_A) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__snake_case : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__snake_case : Any = cur_pieces[1:]
else:
__snake_case : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_A)
else:
new_pieces.append(_A)
return new_pieces
def _lowercase (self : Optional[Any] , _A : List[str]) -> int:
return self.sp_model.PieceToId(_A)
def _lowercase (self : Optional[int] , _A : Tuple) -> Union[str, Any]:
return self.sp_model.IdToPiece(_A)
def _lowercase (self : Union[str, Any] , _A : Union[str, Any]) -> Dict:
__snake_case : Any = []
__snake_case : Dict = ''
__snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A) + token
__snake_case : List[str] = True
__snake_case : int = []
else:
current_sub_tokens.append(_A)
__snake_case : int = False
out_string += self.sp_model.decode(_A)
return out_string.strip()
def _lowercase (self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Dict = [self.sep_token_id]
__snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase (self : Tuple , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A)
if token_ids_a is not None:
return [1] + ([0] * len(_A)) + [1] + ([0] * len(_A)) + [1]
return [1] + ([0] * len(_A)) + [1]
def _lowercase (self : str , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : int = [self.sep_token_id]
__snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowercase (self : Optional[Any] , _A : str , _A : Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_A):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__snake_case : Optional[Any] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _A)
elif not os.path.isfile(self.vocab_file):
with open(_A , 'wb') as fi:
__snake_case : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A)
return (out_vocab_file,)
| 172
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[Any] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase : Dict = """CLIPImageProcessor"""
UpperCAmelCase : Dict = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__(self : Union[str, Any] , _A : Dict=None , _A : Tuple=None , **_A : Optional[int]) -> Optional[int]:
__snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
__snake_case : List[Any] = kwargs.pop('feature_extractor')
__snake_case : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_A , _A)
def __call__(self : Dict , _A : Tuple=None , _A : Optional[int]=None , _A : Tuple=None , **_A : Any) -> int:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__snake_case : List[str] = self.tokenizer(_A , return_tensors=_A , **_A)
if images is not None:
__snake_case : Any = self.image_processor(_A , return_tensors=_A , **_A)
if text is not None and images is not None:
__snake_case : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A) , tensor_type=_A)
def _lowercase (self : List[Any] , *_A : Dict , **_A : int) -> int:
return self.tokenizer.batch_decode(*_A , **_A)
def _lowercase (self : List[Any] , *_A : Union[str, Any] , **_A : Tuple) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A)
@property
def _lowercase (self : Union[str, Any]) -> List[Any]:
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 172
| 1
|
from copy import deepcopy
class __A :
def __init__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None ):
if arr is None and size is not None:
lowerCamelCase =size
lowerCamelCase =[0] * size
elif arr is not None:
self.init(UpperCAmelCase_ )
else:
raise ValueError("""Either arr or size must be specified""" )
def _snake_case ( self , UpperCAmelCase_ ):
lowerCamelCase =len(UpperCAmelCase_ )
lowerCamelCase =deepcopy(UpperCAmelCase_ )
for i in range(1 , self.size ):
lowerCamelCase =self.next_(UpperCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _snake_case ( self ):
lowerCamelCase =self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCamelCase =self.next_(UpperCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _snake_case ( UpperCAmelCase_ ):
return index + (index & (-index))
@staticmethod
def _snake_case ( UpperCAmelCase_ ):
return index - (index & (-index))
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCamelCase =self.next_(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) )
def _snake_case ( self , UpperCAmelCase_ ):
if right == 0:
return 0
lowerCamelCase =self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCamelCase =self.prev(UpperCAmelCase_ )
return result
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ ):
return self.query(UpperCAmelCase_ , index + 1 )
def _snake_case ( self , UpperCAmelCase_ ):
value -= self.tree[0]
if value < 0:
return -1
lowerCamelCase =1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCamelCase =0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
import qiskit
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> qiskit.result.counts.Counts:
lowerCamelCase =qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase =qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCamelCase =qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(F"Total count for various states are: {single_qubit_measure(1, 1)}")
| 262
| 0
|
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
lowercase = b * b - 4 * a * c
lowercase = (-b + sqrt(__SCREAMING_SNAKE_CASE )) / (2 * a)
lowercase = (-b - sqrt(__SCREAMING_SNAKE_CASE )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase_ ( ):
lowercase , lowercase = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 195
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self ):
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_init_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_train_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_train_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_epoch_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_epoch_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_step_begin' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_step_end' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_evaluate' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_predict' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_save' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_log' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , **snake_case ):
self.events.append('on_prediction_step' )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 , snake_case=0 , snake_case=64 , snake_case=64 , snake_case=None , snake_case=False , **snake_case ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowercase = RegressionDataset(length=snake_case )
lowercase = RegressionDataset(length=snake_case )
lowercase = RegressionModelConfig(a=snake_case , b=snake_case )
lowercase = RegressionPreTrainedModel(snake_case )
lowercase = TrainingArguments(self.output_dir , disable_tqdm=snake_case , report_to=[] , **snake_case )
return Trainer(
snake_case , snake_case , train_dataset=snake_case , eval_dataset=snake_case , callbacks=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
self.assertEqual(len(snake_case ) , len(snake_case ) )
# Order doesn't matter
lowercase = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
lowercase = sorted(snake_case , key=lambda snake_case : cb.__name__ if isinstance(snake_case , snake_case ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case , snake_case ):
if isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , snake_case )
elif isinstance(snake_case , snake_case ) and not isinstance(snake_case , snake_case ):
self.assertEqual(snake_case , cba.__class__ )
elif not isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case ):
self.assertEqual(cba.__class__ , snake_case )
else:
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = ['on_init_end', 'on_train_begin']
lowercase = 0
lowercase = len(trainer.get_eval_dataloader() )
lowercase = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(snake_case ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_trainer()
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# Callbacks passed at init are added to the default callbacks
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowercase = self.get_trainer(disable_tqdm=snake_case )
lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
lowercase = self.get_trainer()
lowercase = trainer.pop_callback(snake_case )
self.assertEqual(cb.__class__ , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
# We can also add, pop, or remove by instance
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case )
expected_callbacks.remove(snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
lowercase = self.get_trainer()
lowercase = trainer.callback_handler.callbacks[0]
lowercase = trainer.pop_callback(snake_case )
self.assertEqual(snake_case , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
trainer.add_callback(snake_case )
expected_callbacks.insert(0 , snake_case )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=snake_case )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# Independent log/save/eval
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# A bit of everything
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case , self.get_expected_events(snake_case ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case ) in warn_mock.call_args[0][0]
| 195
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : str = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 357
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str] = 'mgp-str'
def __init__( self , lowerCamelCase__=[3_2, 1_2_8] , lowerCamelCase__=4 , lowerCamelCase__=3 , lowerCamelCase__=2_7 , lowerCamelCase__=3_8 , lowerCamelCase__=5_0_2_5_7 , lowerCamelCase__=3_0_5_2_2 , lowerCamelCase__=7_6_8 , lowerCamelCase__=1_2 , lowerCamelCase__=1_2 , lowerCamelCase__=4.0 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=False , lowerCamelCase__=0.0_2 , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = max_token_length
_lowerCamelCase = num_character_labels
_lowerCamelCase = num_bpe_labels
_lowerCamelCase = num_wordpiece_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = mlp_ratio
_lowerCamelCase = distilled
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = drop_rate
_lowerCamelCase = qkv_bias
_lowerCamelCase = attn_drop_rate
_lowerCamelCase = drop_path_rate
_lowerCamelCase = output_aa_attentions
_lowerCamelCase = initializer_range
| 73
| 0
|
def lowerCAmelCase_ ( __lowerCAmelCase )-> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__snake_case = parser.parse_args()
__snake_case = '''cpu'''
__snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__snake_case = '''path-to-your-trained-model'''
__snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__snake_case = pipe.to(device)
# to channels last
__snake_case = pipe.unet.to(memory_format=torch.channels_last)
__snake_case = pipe.vae.to(memory_format=torch.channels_last)
__snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__snake_case = torch.randn(2, 4, 64, 64)
__snake_case = torch.rand(1) * 9_99
__snake_case = torch.randn(2, 77, 7_68)
__snake_case = (sample, timestep, encoder_hidden_status)
try:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__snake_case = 6_66
__snake_case = torch.Generator(device).manual_seed(seed)
__snake_case = {'''generator''': generator}
if args.steps is not None:
__snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 348
| 1
|
'''simple docstring'''
import math
import random
def __magic_name__ ( A , A = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase_ = 0.02
def __magic_name__ ( A , A ) -> float:
snake_case = float(2 * (random.randint(1 , 1_0_0 )) - 1 )
for _ in range(A ):
# Forward propagation
snake_case = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
snake_case = (expected / 1_0_0) - layer_a
# Error delta
snake_case = layer_1_error * sigmoid_function(A , A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = int(input("Expected value: "))
lowerCAmelCase_ = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 332
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''roberta'''
def __init__( self, lowercase_=50265, lowercase_=768, lowercase_=12, lowercase_=12, lowercase_=3072, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=512, lowercase_=2, lowercase_=0.02, lowercase_=1E-12, lowercase_=1, lowercase_=0, lowercase_=2, lowercase_="absolute", lowercase_=True, lowercase_=None, **lowercase_, ) -> Tuple:
super().__init__(pad_token_id=lowercase_, bos_token_id=lowercase_, eos_token_id=lowercase_, **lowercase_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( __lowerCAmelCase ):
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 332
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Dict = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""PerceiverFeatureExtractor"""]
__lowerCamelCase : str = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 331
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 131
|
def __snake_case ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''Input value must be an \'int\' type''' )
__a = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131
| 1
|
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCAmelCase_ = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ ( cls ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] = TOKEN
HfFolder.save_token(_A )
@classmethod
def UpperCamelCase__ ( cls ) -> Tuple:
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
UpperCamelCase__ : List[str] = FlaxBertModel(_A )
model.push_to_hub('''test-model-flax''', use_auth_token=self._token )
UpperCamelCase__ : Any = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax" )
UpperCamelCase__ : int = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1E-3, msg=f"{key} not identical" )
# Reset repo
delete_repo(token=self._token, repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A, repo_id='''test-model-flax''', push_to_hub=_A, use_auth_token=self._token )
UpperCamelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax" )
UpperCamelCase__ : Optional[Any] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1E-3, msg=f"{key} not identical" )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
UpperCamelCase__ : Optional[Any] = FlaxBertModel(_A )
model.push_to_hub('''valid_org/test-model-flax-org''', use_auth_token=self._token )
UpperCamelCase__ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCamelCase__ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1E-3, msg=f"{key} not identical" )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A, repo_id='''valid_org/test-model-flax-org''', push_to_hub=_A, use_auth_token=self._token )
UpperCamelCase__ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCamelCase__ : Dict = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A, 1E-3, msg=f"{key} not identical" )
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Optional[int] ) -> List[Any]:
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : Optional[int] = flatten_dict(modela.params )
UpperCamelCase__ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCamelCase__ : int = False
return models_are_equal
@require_flax
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCamelCase__ : Any = FlaxBertModel(_A )
UpperCamelCase__ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A, _A ) )
with self.assertRaises(_A ):
UpperCamelCase__ : Optional[int] = FlaxBertModel.from_pretrained(_A )
UpperCamelCase__ : List[Any] = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertTrue(check_models_equal(_A, _A ) )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCamelCase__ : Tuple = FlaxBertModel(_A )
UpperCamelCase__ : str = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A, _A ), max_shard_size='''10KB''' )
with self.assertRaises(_A ):
UpperCamelCase__ : str = FlaxBertModel.from_pretrained(_A )
UpperCamelCase__ : Dict = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertTrue(check_models_equal(_A, _A ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int = '''bert'''
UpperCamelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(_A ):
UpperCamelCase__ : Tuple = FlaxBertModel.from_pretrained(_A )
UpperCamelCase__ : int = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertIsNotNone(_A )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = '''bert'''
UpperCamelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(_A ):
UpperCamelCase__ : List[Any] = FlaxBertModel.from_pretrained(_A )
UpperCamelCase__ : List[Any] = FlaxBertModel.from_pretrained(_A, subfolder=_A )
self.assertIsNotNone(_A )
| 201
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionDiffEditPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
a_ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([])
def A ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : str , _A : List[str] , _A : Any=0 ) -> str:
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any:
UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[str] ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0]
UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(_A , 1e-4 )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A )
UpperCAmelCase_ : int = pipe.generate_mask(**_A )
UpperCAmelCase_ : Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase_ : List[Any] = np.array([0] * 9 )
UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = '''cpu'''
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : int = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : Tuple ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : Any = '''cpu'''
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A )
UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
UpperCAmelCase_ : Any = raw_image
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Tuple = '''a bowl of pears'''
UpperCAmelCase_ : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[str] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents
UpperCAmelCase_ : Any = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Dict = '''a bowl of pears'''
UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents
UpperCAmelCase_ : Dict = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : Tuple = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 304
| 0
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A :
__UpperCAmelCase : int = XGLMConfig
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Any = 'gelu'
def __init__(self : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str=1_4 , __UpperCAmelCase : List[Any]=7 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[int]=9_9 , __UpperCAmelCase : Dict=3_2 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Optional[int]=3_7 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Tuple=5_1_2 , __UpperCAmelCase : int=0.02 , ) -> str:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = d_model
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = None
UpperCAmelCase__ = 0
UpperCAmelCase__ = 2
UpperCAmelCase__ = 1
def lowercase_ (self : Any ) -> Any:
"""simple docstring"""
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def lowercase_ (self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = self.get_config()
UpperCAmelCase__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowercase_ (self : int ) -> Tuple:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def lowercase_ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase : List[str] = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Union[str, Any] = False
def lowercase_ (self : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFXGLMModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , n_embd=3_7 )
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowercase_ (self : Tuple ) -> str:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[str] , __UpperCAmelCase : int=True ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase__ = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase__ = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
UpperCAmelCase__ = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def lowercase_ (self : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase__ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
UpperCAmelCase__ = tokenizer("Today is a nice day and" , return_tensors="tf" )
UpperCAmelCase__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
UpperCAmelCase__ = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
UpperCAmelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase__ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase__ = "left"
# use different length sentences to test batching
UpperCAmelCase__ = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
UpperCAmelCase__ = tokenizer(__UpperCAmelCase , return_tensors="tf" , padding=__UpperCAmelCase )
UpperCAmelCase__ = inputs["input_ids"]
UpperCAmelCase__ = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 )
UpperCAmelCase__ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
UpperCAmelCase__ = model.generate(input_ids=__UpperCAmelCase , max_new_tokens=1_2 )
UpperCAmelCase__ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
UpperCAmelCase__ = model.generate(input_ids=__UpperCAmelCase , max_new_tokens=1_2 )
UpperCAmelCase__ = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 143
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = Dict[str, Any]
UpperCamelCase__ = List[Prediction]
@add_end_docstrings(UpperCAmelCase_ )
class A ( UpperCAmelCase_ ):
def __init__(self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowercase_ (self : Optional[Any] , **__UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = {}
if "threshold" in kwargs:
UpperCAmelCase__ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__(self : Optional[int] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Dict ) -> Union[Predictions, List[Prediction]]:
"""simple docstring"""
return super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = load_image(__UpperCAmelCase )
UpperCAmelCase__ = torch.IntTensor([[image.height, image.width]] )
UpperCAmelCase__ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
UpperCAmelCase__ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
UpperCAmelCase__ = target_size
return inputs
def lowercase_ (self : List[str] , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = model_inputs.pop("target_size" )
UpperCAmelCase__ = self.model(**__UpperCAmelCase )
UpperCAmelCase__ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
UpperCAmelCase__ = model_inputs["bbox"]
return model_outputs
def lowercase_ (self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict=0.9 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase__ , UpperCAmelCase__ = target_size[0].tolist()
def unnormalize(__UpperCAmelCase : List[str] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
UpperCAmelCase__ , UpperCAmelCase__ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCAmelCase__ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase__ = [unnormalize(__UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
UpperCAmelCase__ = ["score", "label", "box"]
UpperCAmelCase__ = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for vals in zip(scores.tolist() , __UpperCAmelCase , __UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase__ = self.image_processor.post_process_object_detection(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = raw_annotations[0]
UpperCAmelCase__ = raw_annotation["scores"]
UpperCAmelCase__ = raw_annotation["labels"]
UpperCAmelCase__ = raw_annotation["boxes"]
UpperCAmelCase__ = scores.tolist()
UpperCAmelCase__ = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase__ = [self._get_bounding_box(__UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase__ = ["score", "label", "box"]
UpperCAmelCase__ = [
dict(zip(__UpperCAmelCase , __UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def lowercase_ (self : List[str] , __UpperCAmelCase : "torch.Tensor" ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = box.int().tolist()
UpperCAmelCase__ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 143
| 1
|
import mpmath # for roots of unity
import numpy as np
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : Any=None, lowerCamelCase : Union[str, Any]=None ):
'''simple docstring'''
# Input as list
lowercase__ = list(poly_a or [0] )[:]
lowercase__ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase__ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase__ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase__ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase__ = complex(mpmath.root(x=1, n=self.c_max_length, k=1 ) )
# The product
lowercase__ = self.__multiply()
def lowercase__ ( self : List[str], lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(lowerCamelCase ) <= 1:
return dft[0]
#
lowercase__ = self.c_max_length // 2
while next_ncol > 0:
lowercase__ = [[] for i in range(lowerCamelCase )]
lowercase__ = self.root**next_ncol
# First half of next step
lowercase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase__ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(lowerCamelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase__ = new_dft
lowercase__ = next_ncol // 2
return dft[0]
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.__dft('''A''' )
lowercase__ = self.__dft('''B''' )
lowercase__ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase__ = 2
while next_ncol <= self.c_max_length:
lowercase__ = [[] for i in range(lowerCamelCase )]
lowercase__ = self.root ** (next_ncol // 2)
lowercase__ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase__ = new_inverse_c
next_ncol *= 2
# Unpack
lowercase__ = [round(x[0].real, 8 ) + round(x[0].imag, 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''A = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase__ = '''B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase__ = '''A*B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
A__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
A__ ,r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" ,)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Optional[int], lowerCamelCase : GenericTensor ):
'''simple docstring'''
if self.framework == "tf":
lowercase__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=lowerCamelCase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def lowercase__ ( self : List[str], lowerCamelCase : GenericTensor ):
'''simple docstring'''
lowercase__ = self.get_masked_index(lowerCamelCase )
lowercase__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''', self.model.base_model_prefix, F"""No mask_token ({self.tokenizer.mask_token}) found on the input""", )
def lowercase__ ( self : Optional[Any], lowerCamelCase : GenericTensor ):
'''simple docstring'''
if isinstance(lowerCamelCase, lowerCamelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCamelCase )
def lowercase__ ( self : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int]=None, **lowerCamelCase : Dict ):
'''simple docstring'''
if return_tensors is None:
lowercase__ = self.framework
lowercase__ = self.tokenizer(lowerCamelCase, return_tensors=lowerCamelCase )
self.ensure_exactly_one_mask_token(lowerCamelCase )
return model_inputs
def lowercase__ ( self : Optional[Any], lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.model(**lowerCamelCase )
lowercase__ = model_inputs['''input_ids''']
return model_outputs
def lowercase__ ( self : Optional[Any], lowerCamelCase : List[str], lowerCamelCase : Tuple=5, lowerCamelCase : List[Any]=None ):
'''simple docstring'''
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase__ = target_ids.shape[0]
lowercase__ = model_outputs['''input_ids'''][0]
lowercase__ = model_outputs['''logits''']
if self.framework == "tf":
lowercase__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase__ = outputs.numpy()
lowercase__ = outputs[0, masked_index, :]
lowercase__ = stable_softmax(lowerCamelCase, axis=-1 )
if target_ids is not None:
lowercase__ = tf.gather_nd(tf.squeeze(lowerCamelCase, 0 ), target_ids.reshape(-1, 1 ) )
lowercase__ = tf.expand_dims(lowerCamelCase, 0 )
lowercase__ = tf.math.top_k(lowerCamelCase, k=lowerCamelCase )
lowercase__ , lowercase__ = topk.values.numpy(), topk.indices.numpy()
else:
lowercase__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=lowerCamelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase__ = outputs[0, masked_index, :]
lowercase__ = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase__ = probs[..., target_ids]
lowercase__ , lowercase__ = probs.topk(lowerCamelCase )
lowercase__ = []
lowercase__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist(), predictions.tolist() ) ):
lowercase__ = []
for v, p in zip(_values, _predictions ):
# Copy is important since we're going to modify this array in place
lowercase__ = input_ids.numpy().copy()
if target_ids is not None:
lowercase__ = target_ids[p].tolist()
lowercase__ = p
# Filter padding out:
lowercase__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase__ = self.tokenizer.decode(lowerCamelCase, skip_special_tokens=lowerCamelCase )
lowercase__ = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(lowerCamelCase )
result.append(lowerCamelCase )
if single_mask:
return result[0]
return result
def lowercase__ ( self : int, lowerCamelCase : Optional[int], lowerCamelCase : Dict=None ):
'''simple docstring'''
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [targets]
try:
lowercase__ = self.tokenizer.get_vocab()
except Exception:
lowercase__ = {}
lowercase__ = []
for target in targets:
lowercase__ = vocab.get(lowerCamelCase, lowerCamelCase )
if id_ is None:
lowercase__ = self.tokenizer(
lowerCamelCase, add_special_tokens=lowerCamelCase, return_attention_mask=lowerCamelCase, return_token_type_ids=lowerCamelCase, max_length=1, truncation=lowerCamelCase, )['''input_ids''']
if len(lowerCamelCase ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowercase__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
lowercase__ = list(set(lowerCamelCase ) )
if len(lowerCamelCase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowercase__ = np.array(lowerCamelCase )
return target_ids
def lowercase__ ( self : List[str], lowerCamelCase : int=None, lowerCamelCase : Any=None ):
'''simple docstring'''
lowercase__ = {}
if targets is not None:
lowercase__ = self.get_target_ids(lowerCamelCase, lowerCamelCase )
lowercase__ = target_ids
if top_k is not None:
lowercase__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''', self.model.base_model_prefix, '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : List[Any], lowerCamelCase : Optional[Any], *lowerCamelCase : Optional[Any], **lowerCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = super().__call__(lowerCamelCase, **lowerCamelCase )
if isinstance(lowerCamelCase, lowerCamelCase ) and len(lowerCamelCase ) == 1:
return outputs[0]
return outputs
| 207
| 1
|
import re
from ..utils import cached_file
# docstyle-ignore
lowercase = "\nHuman: <<task>>\n\nAssistant: "
lowercase = "huggingface-tools/default-prompts"
lowercase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : int, UpperCamelCase__ : Tuple="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
UpperCamelCase__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''', _UpperCamelCase ) is not None:
return prompt_or_repo_id
UpperCamelCase__ = cached_file(
_UpperCamelCase, PROMPT_FILES[mode], repo_type='''dataset''', user_agent={'''agent''': agent_name} )
with open(_UpperCamelCase, '''r''', encoding='''utf-8''' ) as f:
return f.read()
| 359
|
from __future__ import annotations
from typing import Any
def lowerCamelCase_ ( UpperCamelCase__ : list ):
'''simple docstring'''
if not postfix_notation:
return 0
UpperCamelCase__ = {'''+''', '''-''', '''*''', '''/'''}
UpperCamelCase__ = []
for token in postfix_notation:
if token in operations:
UpperCamelCase__ , UpperCamelCase__ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCamelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_A = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_A = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def lowerCamelCase__ ( a__ : Optional[int] ) -> int:
UpperCamelCase_ = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=a__ )[0]
@deprecated(a__ , """Please use tf.data to implement this functionality.""" )
def lowerCamelCase__ ( a__ : Any ) -> Any:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=a__ ) as bytestream:
UpperCamelCase_ = _readaa(a__ )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
UpperCamelCase_ = _readaa(a__ )
UpperCamelCase_ = _readaa(a__ )
UpperCamelCase_ = _readaa(a__ )
UpperCamelCase_ = bytestream.read(rows * cols * num_images )
UpperCamelCase_ = numpy.frombuffer(a__ , dtype=numpy.uinta )
UpperCamelCase_ = data.reshape(a__ , a__ , a__ , 1 )
return data
@deprecated(a__ , """Please use tf.one_hot on tensors.""" )
def lowerCamelCase__ ( a__ : Any , a__ : str ) -> Any:
UpperCamelCase_ = labels_dense.shape[0]
UpperCamelCase_ = numpy.arange(a__ ) * num_classes
UpperCamelCase_ = numpy.zeros((num_labels, num_classes) )
UpperCamelCase_ = 1
return labels_one_hot
@deprecated(a__ , """Please use tf.data to implement this functionality.""" )
def lowerCamelCase__ ( a__ : str , a__ : Union[str, Any]=False , a__ : Optional[int]=10 ) -> List[str]:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=a__ ) as bytestream:
UpperCamelCase_ = _readaa(a__ )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
UpperCamelCase_ = _readaa(a__ )
UpperCamelCase_ = bytestream.read(a__ )
UpperCamelCase_ = numpy.frombuffer(a__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(a__ , a__ )
return labels
class lowercase_ :
@deprecated(
__UpperCamelCase , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=dtypes.floataa , __UpperCamelCase=True , __UpperCamelCase=None , ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = random_seed.get_seed(__UpperCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
UpperCamelCase_ = dtypes.as_dtype(__UpperCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
UpperCamelCase_ = 1_0_0_0_0
UpperCamelCase_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
UpperCamelCase_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
UpperCamelCase_ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
UpperCamelCase_ = images.astype(numpy.floataa )
UpperCamelCase_ = numpy.multiply(__UpperCamelCase , 1.0 / 255.0 )
UpperCamelCase_ = images
UpperCamelCase_ = labels
UpperCamelCase_ = 0
UpperCamelCase_ = 0
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._images
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._labels
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._num_examples
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._epochs_completed
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=True ):
"""simple docstring"""
if fake_data:
UpperCamelCase_ = [1] * 7_8_4
UpperCamelCase_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__UpperCamelCase )],
[fake_label for _ in range(__UpperCamelCase )],
)
UpperCamelCase_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
UpperCamelCase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCamelCase )
UpperCamelCase_ = self.images[perma]
UpperCamelCase_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
UpperCamelCase_ = self._num_examples - start
UpperCamelCase_ = self._images[start : self._num_examples]
UpperCamelCase_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
UpperCamelCase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(__UpperCamelCase )
UpperCamelCase_ = self.images[perm]
UpperCamelCase_ = self.labels[perm]
# Start next epoch
UpperCamelCase_ = 0
UpperCamelCase_ = batch_size - rest_num_examples
UpperCamelCase_ = self._index_in_epoch
UpperCamelCase_ = self._images[start:end]
UpperCamelCase_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
UpperCamelCase_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(a__ , """Please write your own downloading logic.""" )
def lowerCamelCase__ ( a__ : Optional[Any] , a__ : List[str] , a__ : str ) -> Union[str, Any]:
if not gfile.Exists(a__ ):
gfile.MakeDirs(a__ )
UpperCamelCase_ = os.path.join(a__ , a__ )
if not gfile.Exists(a__ ):
urllib.request.urlretrieve(a__ , a__ ) # noqa: S310
with gfile.GFile(a__ ) as f:
UpperCamelCase_ = f.size()
print("""Successfully downloaded""" , a__ , a__ , """bytes.""" )
return filepath
@deprecated(
a__ , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def lowerCamelCase__ ( a__ : int , a__ : Tuple=False , a__ : Any=False , a__ : Tuple=dtypes.floataa , a__ : Tuple=True , a__ : List[Any]=5000 , a__ : Optional[Any]=None , a__ : List[str]=DEFAULT_SOURCE_URL , ) -> List[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=a__ , one_hot=a__ , dtype=a__ , seed=a__ )
UpperCamelCase_ = fake()
UpperCamelCase_ = fake()
UpperCamelCase_ = fake()
return _Datasets(train=a__ , validation=a__ , test=a__ )
if not source_url: # empty string check
UpperCamelCase_ = DEFAULT_SOURCE_URL
UpperCamelCase_ = """train-images-idx3-ubyte.gz"""
UpperCamelCase_ = """train-labels-idx1-ubyte.gz"""
UpperCamelCase_ = """t10k-images-idx3-ubyte.gz"""
UpperCamelCase_ = """t10k-labels-idx1-ubyte.gz"""
UpperCamelCase_ = _maybe_download(
a__ , a__ , source_url + train_images_file )
with gfile.Open(a__ , """rb""" ) as f:
UpperCamelCase_ = _extract_images(a__ )
UpperCamelCase_ = _maybe_download(
a__ , a__ , source_url + train_labels_file )
with gfile.Open(a__ , """rb""" ) as f:
UpperCamelCase_ = _extract_labels(a__ , one_hot=a__ )
UpperCamelCase_ = _maybe_download(
a__ , a__ , source_url + test_images_file )
with gfile.Open(a__ , """rb""" ) as f:
UpperCamelCase_ = _extract_images(a__ )
UpperCamelCase_ = _maybe_download(
a__ , a__ , source_url + test_labels_file )
with gfile.Open(a__ , """rb""" ) as f:
UpperCamelCase_ = _extract_labels(a__ , one_hot=a__ )
if not 0 <= validation_size <= len(a__ ):
UpperCamelCase_ = (
"""Validation size should be between 0 and """
f'''{len(a__ )}. Received: {validation_size}.'''
)
raise ValueError(a__ )
UpperCamelCase_ = train_images[:validation_size]
UpperCamelCase_ = train_labels[:validation_size]
UpperCamelCase_ = train_images[validation_size:]
UpperCamelCase_ = train_labels[validation_size:]
UpperCamelCase_ = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
UpperCamelCase_ = _DataSet(a__ , a__ , **a__ )
UpperCamelCase_ = _DataSet(a__ , a__ , **a__ )
UpperCamelCase_ = _DataSet(a__ , a__ , **a__ )
return _Datasets(train=a__ , validation=a__ , test=a__ )
| 122
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase_ :
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return None
class lowercase_ :
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return None
class lowercase_ ( unittest.TestCase ):
A__ : Union[str, Any] = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase , """tf""" , 1_2 , **__UpperCamelCase )
@require_torch
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase , """pt""" , 1_2 , **__UpperCamelCase )
@require_torch
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
from transformers import BertModel
UpperCamelCase_ = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__UpperCamelCase ) )
vocab_file.flush()
UpperCamelCase_ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase_ = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase , """pt""" , 1_2 , __UpperCamelCase )
@require_tf
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase_ = self._test_export(__UpperCamelCase , """tf""" , 1_2 , **__UpperCamelCase )
UpperCamelCase_ = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase_ = self._test_export(__UpperCamelCase , """pt""" , 1_2 , **__UpperCamelCase )
UpperCamelCase_ = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ):
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase_ = Path(__UpperCamelCase ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
from transformers import BertModel
UpperCamelCase_ = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
UpperCamelCase_ = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__UpperCamelCase , __UpperCamelCase , """pt""" )
@require_tf
@require_tokenizers
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
from transformers import TFBertModel
UpperCamelCase_ = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
UpperCamelCase_ = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__UpperCamelCase , __UpperCamelCase , """tf""" )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = FeatureExtractionPipeline(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = infer_shapes(__UpperCamelCase , __UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] , __UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ["""input_ids""", """attention_mask""", """token_type_ids"""]
UpperCamelCase_ = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
UpperCamelCase_ , UpperCamelCase_ = ensure_valid_input(FuncContiguousArgs() , __UpperCamelCase , __UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) , set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase_ , UpperCamelCase_ = ensure_valid_input(FuncNonContiguousArgs() , __UpperCamelCase , __UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) , 1 )
self.assertEqual(len(__UpperCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 122
| 1
|
from __future__ import annotations
def UpperCAmelCase_ (_lowerCAmelCase : str ):
return [ord(snake_case__ ) - 96 for elem in plain]
def UpperCAmelCase_ (_lowerCAmelCase : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCAmelCase_ ():
__UpperCamelCase : List[str] = encode(input("-> " ).strip().lower() )
print("Encoded: " , snake_case__ )
print("Decoded:" , decode(snake_case__ ) )
if __name__ == "__main__":
main()
| 365
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : List[str] = 'marian'
lowercase : int = ['past_key_values']
lowercase : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCamelCase=5_81_01 , __UpperCamelCase=None , __UpperCamelCase=10_24 , __UpperCamelCase=12 , __UpperCamelCase=40_96 , __UpperCamelCase=16 , __UpperCamelCase=12 , __UpperCamelCase=40_96 , __UpperCamelCase=16 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="gelu" , __UpperCamelCase=10_24 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=5_81_00 , __UpperCamelCase=False , __UpperCamelCase=5_81_00 , __UpperCamelCase=0 , __UpperCamelCase=0 , __UpperCamelCase=True , **__UpperCamelCase , ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Any = vocab_size
__UpperCamelCase : str = decoder_vocab_size or vocab_size
__UpperCamelCase : Any = max_position_embeddings
__UpperCamelCase : List[Any] = d_model
__UpperCamelCase : Optional[int] = encoder_ffn_dim
__UpperCamelCase : Union[str, Any] = encoder_layers
__UpperCamelCase : Tuple = encoder_attention_heads
__UpperCamelCase : Dict = decoder_ffn_dim
__UpperCamelCase : Optional[Any] = decoder_layers
__UpperCamelCase : Optional[int] = decoder_attention_heads
__UpperCamelCase : Union[str, Any] = dropout
__UpperCamelCase : List[str] = attention_dropout
__UpperCamelCase : int = activation_dropout
__UpperCamelCase : Tuple = activation_function
__UpperCamelCase : List[str] = init_std
__UpperCamelCase : int = encoder_layerdrop
__UpperCamelCase : List[Any] = decoder_layerdrop
__UpperCamelCase : Dict = use_cache
__UpperCamelCase : str = encoder_layers
__UpperCamelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase : List[str] = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__UpperCamelCase : str = {0: "batch"}
__UpperCamelCase : Optional[int] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__UpperCamelCase : Optional[Any] = {0: "batch", 1: "decoder_sequence"}
__UpperCamelCase : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__UpperCamelCase : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__UpperCamelCase , __UpperCamelCase : Any = self.num_layers
for i in range(__UpperCamelCase ):
__UpperCamelCase : Any = {0: "batch", 2: "past_sequence + sequence"}
__UpperCamelCase : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
__UpperCamelCase : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : List[Any] = super().outputs
else:
__UpperCamelCase : Optional[Any] = super(__UpperCamelCase , self ).outputs
if self.use_past:
__UpperCamelCase , __UpperCamelCase : int = self.num_layers
for i in range(__UpperCamelCase ):
__UpperCamelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"}
__UpperCamelCase : str = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase : str = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Generate decoder inputs
__UpperCamelCase : Any = seq_length if not self.use_past else 1
__UpperCamelCase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : Any = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__UpperCamelCase : List[Any] = dict(**__UpperCamelCase , **__UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCamelCase , __UpperCamelCase : Dict = common_inputs["input_ids"].shape
__UpperCamelCase : Dict = common_inputs["decoder_input_ids"].shape[1]
__UpperCamelCase , __UpperCamelCase : Any = self.num_attention_heads
__UpperCamelCase : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase : List[str] = decoder_seq_length + 3
__UpperCamelCase : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__UpperCamelCase : List[str] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase )] , dim=1 )
__UpperCamelCase : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__UpperCamelCase , __UpperCamelCase : List[str] = self.num_layers
__UpperCamelCase : Optional[int] = min(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase : Optional[int] = max(__UpperCamelCase , __UpperCamelCase ) - min_num_layers
__UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
torch.zeros(__UpperCamelCase ),
) )
# TODO: test this.
__UpperCamelCase : Any = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCamelCase , __UpperCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) )
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase : int = self._generate_dummy_inputs_for_encoder_and_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__UpperCamelCase , __UpperCamelCase : str = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__UpperCamelCase : int = seqlen + 2
__UpperCamelCase , __UpperCamelCase : str = self.num_layers
__UpperCamelCase , __UpperCamelCase : List[str] = self.num_attention_heads
__UpperCamelCase : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCamelCase : Any = common_inputs["attention_mask"].dtype
__UpperCamelCase : Optional[Any] = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
__UpperCamelCase : int = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(__UpperCamelCase )
]
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase : Any = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCamelCase : List[Any] = tokenizer.num_special_tokens_to_add(__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = compute_effective_axis_dimension(
__UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCamelCase )
# Generate dummy inputs according to compute batch and sequence
__UpperCamelCase : Tuple = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__UpperCamelCase : Tuple = dict(tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) )
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
else:
__UpperCamelCase : int = self._generate_dummy_inputs_for_causal_lm(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
return common_inputs
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__UpperCamelCase : List[Any] = super()._flatten_past_key_values_(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
__UpperCamelCase : str = super(__UpperCamelCase , self )._flatten_past_key_values_(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@property
def __lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1E-4
| 171
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase : Dict = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase : List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
def _SCREAMING_SNAKE_CASE (A , A=100 , A=" " ) -> List[str]:
"""simple docstring"""
lowercase__ = text.split(A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(A ) , A )]
def _SCREAMING_SNAKE_CASE (A ) -> dict:
"""simple docstring"""
lowercase__ ,lowercase__ = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(A ):
titles.append(title if title is not None else '''''' )
texts.append(A )
return {"title": titles, "text": texts}
def _SCREAMING_SNAKE_CASE (A , A , A ) -> dict:
"""simple docstring"""
lowercase__ = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=A , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
lowercase__ = ctx_encoder(input_ids.to(device=A ) , return_dict=A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _SCREAMING_SNAKE_CASE (A , A , A , ) -> List[str]:
"""simple docstring"""
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase__ = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase__ = dataset.map(A , batched=A , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=A )
lowercase__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase__ = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
lowercase__ = dataset.map(
partial(A , ctx_encoder=A , ctx_tokenizer=A ) , batched=A , batch_size=processing_args.batch_size , features=A , )
# And finally save your dataset
lowercase__ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=A )
# And save the index
lowercase__ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : str = field(
default=str(Path(lowercase_ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
lowerCAmelCase__ : Optional[str] = field(
default=lowercase_ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
lowerCAmelCase__ : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
lowerCAmelCase__ : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
lowerCAmelCase__ : Optional[str] = field(
default=str(Path(lowercase_ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = field(
default=lowercase_ , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
lowerCAmelCase__ : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : int = field(
default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
lowerCAmelCase__ : int = field(
default=128 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase : Optional[Any] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase : Optional[int] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 2
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaVaTokenizer
lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = '''this is a test'''
lowercase__ = '''this is a test'''
return input_text, output_text
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''<pad>'''
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(UpperCamelCase ) , 30001 )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = ''' \tHeLLo!how \n Are yoU? '''
lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = '''This is a test'''
lowercase__ = [13, 1, 4398, 25, 21, 1289]
lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase )
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
# fmt: off
lowercase__ = '''I was born in 92000, and this is falsé.'''
lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = DebertaVaTokenizer(UpperCamelCase )
lowercase__ = tokenizer.encode('''sequence builders''' )
lowercase__ = tokenizer.encode('''multi-sequence build''' )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , )
@slow
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2
| 1
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__a: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1_6000 ):
lowercase__ : Optional[Any] = int(round(sample_rate * max_length ) )
if len(UpperCAmelCase ) <= sample_length:
return wav
lowercase__ : Tuple = randint(0 , len(UpperCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(default=a__ , metadata={"help": "Name of a dataset from the datasets package"} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "A file containing the training audio paths and labels."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "A file containing the validation audio paths and labels."} )
SCREAMING_SNAKE_CASE = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
SCREAMING_SNAKE_CASE = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
SCREAMING_SNAKE_CASE = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=2_0 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Name or path of preprocessor config."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _lowerCAmelCase( self ) -> List[Any]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def __UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , UpperCAmelCase , UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase )
transformers.utils.logging.set_verbosity(UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase__ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
lowercase__ : Optional[Any] = DatasetDict()
lowercase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase__ : Dict = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase__ : Optional[int] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase__ : Tuple = feature_extractor.model_input_names[0]
def train_transforms(UpperCAmelCase ):
lowercase__ : Tuple = []
for audio in batch[data_args.audio_column_name]:
lowercase__ : Optional[Any] = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(UpperCAmelCase )
lowercase__ : Optional[int] = feature_extractor(UpperCAmelCase , sampling_rate=feature_extractor.sampling_rate )
lowercase__ : Optional[Any] = {model_input_name: inputs.get(UpperCAmelCase )}
lowercase__ : str = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(UpperCAmelCase ):
lowercase__ : Optional[Any] = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
lowercase__ : str = feature_extractor(UpperCAmelCase , sampling_rate=feature_extractor.sampling_rate )
lowercase__ : Optional[Any] = {model_input_name: inputs.get(UpperCAmelCase )}
lowercase__ : Optional[Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__ : int = raw_datasets['''train'''].features[data_args.label_column_name].names
lowercase__ , lowercase__ : Tuple = {}, {}
for i, label in enumerate(UpperCAmelCase ):
lowercase__ : Optional[int] = str(UpperCAmelCase )
lowercase__ : str = label
# Load the accuracy metric from the datasets package
lowercase__ : str = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase ):
lowercase__ : str = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=UpperCAmelCase , references=eval_pred.label_ids )
lowercase__ : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCAmelCase ) , labelaid=UpperCAmelCase , idalabel=UpperCAmelCase , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Tuple = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__ : int = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(UpperCAmelCase , output_all_columns=UpperCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__ : List[str] = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(UpperCAmelCase , output_all_columns=UpperCAmelCase )
# Initialize our trainer
lowercase__ : Any = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=UpperCAmelCase , tokenizer=UpperCAmelCase , )
# Training
if training_args.do_train:
lowercase__ : Any = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : int = last_checkpoint
lowercase__ : Any = trainer.train(resume_from_checkpoint=UpperCAmelCase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ : List[str] = trainer.evaluate()
trainer.log_metrics('''eval''' , UpperCAmelCase )
trainer.save_metrics('''eval''' , UpperCAmelCase )
# Write model card and (optionally) push to hub
lowercase__ : Optional[int] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase )
else:
trainer.create_model_card(**UpperCAmelCase )
if __name__ == "__main__":
main()
| 214
|
'''simple docstring'''
import numpy as np
def __UpperCamelCase ( UpperCAmelCase ):
return 1 / (1 + np.exp(-vector ))
def __UpperCamelCase ( UpperCAmelCase ):
return vector * sigmoid(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214
| 1
|
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE :int = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase_ ( __lowercase : Union[str, Any] , __lowercase : str=None ) -> int:
'''simple docstring'''
require_version(deps[pkg] , __lowercase )
| 22
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase_ ( )-> int:
'''simple docstring'''
UpperCAmelCase : str ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
UpperCAmelCase : Union[str, Any] =Dataset.from_dict(__lowerCAmelCase )
return dataset
class __snake_case ( lowerCamelCase__ ):
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] =get_dataset()
UpperCAmelCase : Optional[int] =make_duplicate_clusters(snake_case__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str =get_dataset()
UpperCAmelCase , UpperCAmelCase : Tuple =deduplicate_dataset(snake_case__ )
self.assertEqual(len(snake_case__ ) , 2 )
print(snake_case__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , snake_case__ )
| 348
| 0
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowercase__ : Any = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
lowercase__ : Union[str, Any] = '''hopper-medium-v2'''
lowercase__ : Union[str, Any] = gym.make(env_name)
lowercase__ : List[str] = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
lowercase__ : str = env.reset()
lowercase__ : List[Any] = 0
lowercase__ : Union[str, Any] = 0
lowercase__ : List[Any] = 10_00
lowercase__ : str = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowercase__ : Tuple = pipeline(obs, planning_horizon=32)
# execute action in environment
lowercase__ : Dict = env.step(denorm_actions)
lowercase__ : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
lowercase__ : Dict = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ : List[Any] = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = ['''CLIPFeatureExtractor''']
lowercase__ : Any = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 155
| 0
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowercase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_lowerCamelCase: Optional[int] = field(
default=_lowercase , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
_lowerCamelCase: Optional[int] = field(
default=_lowercase , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
_lowerCamelCase: Optional[Union[str, Path, GenerationConfig]] = field(
default=_lowercase , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
A = super().to_dict()
for k, v in d.items():
if isinstance(A_ ,A_ ):
A = v.to_dict()
return d
| 74
|
'''simple docstring'''
import argparse
import os
import re
_snake_case = 'src/transformers'
# Pattern that looks at the indentation in a line.
_snake_case = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_snake_case = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_snake_case = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_snake_case = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_snake_case = re.compile(r'\[([^\]]+)\]')
def _A ( snake_case ) -> str:
_lowercase : Union[str, Any] = _re_indent.search(snake_case )
return "" if search is None else search.groups()[0]
def _A ( snake_case , snake_case="" , snake_case=None , snake_case=None ) -> Optional[int]:
_lowercase : List[str] = 0
_lowercase : str = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(snake_case ):
index += 1
_lowercase : Optional[int] = ["\n".join(lines[:index] )]
else:
_lowercase : Dict = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowercase : Any = [lines[index]]
index += 1
while index < len(snake_case ) and (end_prompt is None or not lines[index].startswith(snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(snake_case ) )
if index < len(snake_case ) - 1:
_lowercase : int = [lines[index + 1]]
index += 1
else:
_lowercase : Optional[int] = []
else:
blocks.append("\n".join(snake_case ) )
_lowercase : Optional[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case ) > 0:
blocks.append("\n".join(snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _A ( snake_case ) -> Optional[int]:
def _inner(snake_case ):
return key(snake_case ).lower().replace("_" , "" )
return _inner
def _A ( snake_case , snake_case=None ) -> List[str]:
# If no key is provided, we use a noop.
def noop(snake_case ):
return x
if key is None:
_lowercase : Optional[int] = noop
# Constants are all uppercase, they go first.
_lowercase : Dict = [obj for obj in objects if key(snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowercase : int = [obj for obj in objects if key(snake_case )[0].isupper() and not key(snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
_lowercase : Dict = [obj for obj in objects if not key(snake_case )[0].isupper()]
_lowercase : Union[str, Any] = ignore_underscore(snake_case )
return sorted(snake_case , key=snake_case ) + sorted(snake_case , key=snake_case ) + sorted(snake_case , key=snake_case )
def _A ( snake_case ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(snake_case ):
_lowercase : Optional[Any] = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_lowercase : str = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : Optional[int] = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(snake_case )] ) + "]"
_lowercase : Tuple = import_statement.split("\n" )
if len(snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowercase : Union[str, Any] = 2 if lines[1].strip() == "[" else 1
_lowercase : Optional[int] = [(i, _re_strip_line.search(snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowercase : Any = sort_objects(snake_case , key=lambda snake_case : x[1] )
_lowercase : Tuple = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowercase : Dict = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowercase : Optional[Any] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowercase : Optional[int] = keys[:-1]
_lowercase : Optional[Any] = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(snake_case )] )
return "\n".join(snake_case )
else:
# Finally we have to deal with imports fitting on one line
_lowercase : Optional[Any] = _re_bracket_content.sub(_replace , snake_case )
return import_statement
def _A ( snake_case , snake_case=True ) -> Dict:
with open(snake_case , encoding="utf-8" ) as f:
_lowercase : Dict = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowercase : Optional[Any] = split_code_in_indented_blocks(
snake_case , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowercase : Dict = main_blocks[block_idx]
_lowercase : Union[str, Any] = block.split("\n" )
# Get to the start of the imports.
_lowercase : int = 0
while line_idx < len(snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowercase : Optional[Any] = len(snake_case )
else:
line_idx += 1
if line_idx >= len(snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowercase : Any = "\n".join(block_lines[line_idx:-1] )
_lowercase : int = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowercase : Optional[int] = split_code_in_indented_blocks(snake_case , indent_level=snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowercase : Union[str, Any] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowercase : str = [(pattern.search(snake_case ).groups()[0] if pattern.search(snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowercase : List[str] = [(i, key) for i, key in enumerate(snake_case ) if key is not None]
_lowercase : Tuple = [x[0] for x in sorted(snake_case , key=lambda snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowercase : Any = 0
_lowercase : str = []
for i in range(len(snake_case ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowercase : Tuple = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(snake_case )
count += 1
# And we put our main block back together with its first and last line.
_lowercase : int = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write("\n".join(snake_case ) )
def _A ( snake_case=True ) -> str:
_lowercase : List[Any] = []
for root, _, files in os.walk(snake_case ):
if "__init__.py" in files:
_lowercase : Tuple = sort_imports(os.path.join(snake_case , "__init__.py" ) , check_only=snake_case )
if result:
_lowercase : Any = [os.path.join(snake_case , "__init__.py" )]
if len(snake_case ) > 0:
raise ValueError(F'''Would overwrite {len(snake_case )} files, run `make style`.''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_snake_case = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 250
| 0
|
__lowerCAmelCase = 8.314462 # Unit - J mol-1 K-1
def snake_case_ ( snake_case , snake_case , snake_case ) -> Any:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def snake_case_ ( snake_case , snake_case , snake_case ) -> List[Any]:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=1 / 255 , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=True , ) -> List[str]:
'''simple docstring'''
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase__: Dict = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
lowercase__: Tuple = parent
lowercase__: Optional[Any] = batch_size
lowercase__: Any = num_channels
lowercase__: str = min_resolution
lowercase__: Dict = max_resolution
lowercase__: Any = do_resize
lowercase__: str = size
lowercase__: Any = do_rescale
lowercase__: Union[str, Any] = rescale_factor
lowercase__: Optional[int] = do_normalize
lowercase__: Union[str, Any] = image_mean
lowercase__: List[str] = image_std
lowercase__: Optional[Any] = do_pad
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
'''simple docstring'''
if not batched:
lowercase__: List[Any] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
lowercase__ , lowercase__: List[str] = image.size
else:
lowercase__ , lowercase__: str = image.shape[1], image.shape[2]
if w < h:
lowercase__: Optional[int] = int(self.size['shortest_edge'] * h / w )
lowercase__: int = self.size['shortest_edge']
elif w > h:
lowercase__: Tuple = self.size['shortest_edge']
lowercase__: int = int(self.size['shortest_edge'] * w / h )
else:
lowercase__: Tuple = self.size['shortest_edge']
lowercase__: Optional[Any] = self.size['shortest_edge']
else:
lowercase__: str = []
for image in image_inputs:
lowercase__ , lowercase__: Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__: Union[str, Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
lowercase__: Any = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Tuple = DetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = DetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_rescale' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'rescale_factor' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_pad' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
lowercase__: str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
# Initialize image_processing
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
lowercase__: Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__ , lowercase__: Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
lowercase__: Dict = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# Initialize image_processing
lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
lowercase__: Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
# Initialize image_processing
lowercase__: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase__: Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# prepare image and target
lowercase__: Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowercase__: Optional[int] = json.loads(f.read() )
lowercase__: Optional[Any] = {'image_id': 39_769, 'annotations': target}
# encode them
lowercase__: Optional[Any] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowercase__: List[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowercase__: Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__ )
lowercase__: Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase__: List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__ ) )
# verify boxes
lowercase__: Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__ )
lowercase__: int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase__: List[Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__ ) )
# verify is_crowd
lowercase__: Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__ ) )
# verify class_labels
lowercase__: List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__ ) )
# verify orig_size
lowercase__: Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__ ) )
# verify size
lowercase__: Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# prepare image, target and masks_path
lowercase__: List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowercase__: Tuple = json.loads(f.read() )
lowercase__: Tuple = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
lowercase__: List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowercase__: Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowercase__: Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowercase__: Any = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase__ )
lowercase__: Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
lowercase__: str = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase__ ) )
# verify boxes
lowercase__: Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase__ )
lowercase__: str = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowercase__: Optional[int] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase__ ) )
# verify is_crowd
lowercase__: List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase__ ) )
# verify class_labels
lowercase__: Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase__ ) )
# verify masks
lowercase__: str = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowerCAmelCase__ )
# verify orig_size
lowercase__: Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase__ ) )
# verify size
lowercase__: Optional[int] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase__ ) )
| 288
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ : Union[str, Any] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
lowercase_ : int = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
lowercase_ : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
lowercase_ : Dict = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowercase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 133
|
from typing import Any
class __lowerCAmelCase :
def __init__( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
class __lowerCAmelCase :
def __init__( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = None
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase = temp.next
print()
def UpperCamelCase ( self : Any , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = Node(snake_case__ )
_UpperCAmelCase = self.head
_UpperCAmelCase = new_node
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
lowercase_ : Union[str, Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 133
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n'
@dataclass
class _SCREAMING_SNAKE_CASE ( _a ):
UpperCAmelCase_ :Any = 42
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __A , __A , __A , __A , __A , ) -> Optional[int]:
super().__init__()
self.register_modules(
prior=snake_case_ , image_encoder=snake_case_ , image_processor=snake_case_ , scheduler=snake_case_ , renderer=snake_case_ , )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
if latents is None:
lowerCAmelCase_ :int = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCAmelCase_ :Union[str, Any] = latents.to(snake_case_ )
lowerCAmelCase_ :List[str] = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self , __A=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase_ :Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowerCAmelCase_ :Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
@property
def __lowerCAmelCase ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(snake_case_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __lowerCAmelCase ( self , __A , __A , __A , __A , ) -> Optional[int]:
if isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , torch.Tensor ):
lowerCAmelCase_ :Any = torch.cat(snake_case_ , axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case_ , axis=0 )
if not isinstance(snake_case_ , torch.Tensor ):
lowerCAmelCase_ :List[Any] = self.image_processor(snake_case_ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
lowerCAmelCase_ :int = image.to(dtype=self.image_encoder.dtype , device=snake_case_ )
lowerCAmelCase_ :Optional[Any] = self.image_encoder(snake_case_ )["""last_hidden_state"""]
lowerCAmelCase_ :Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowerCAmelCase_ :Union[str, Any] = image_embeds.repeat_interleave(snake_case_ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase_ :Any = torch.zeros_like(snake_case_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase_ :List[Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self , __A , __A = 1 , __A = 25 , __A = None , __A = None , __A = 4.0 , __A = 64 , __A = "pil" , __A = True , ) -> List[Any]:
if isinstance(snake_case_ , PIL.Image.Image ):
lowerCAmelCase_ :Union[str, Any] = 1
elif isinstance(snake_case_ , torch.Tensor ):
lowerCAmelCase_ :Union[str, Any] = image.shape[0]
elif isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
lowerCAmelCase_ :Dict = len(snake_case_ )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case_ )}""" )
lowerCAmelCase_ :Any = self._execution_device
lowerCAmelCase_ :Any = batch_size * num_images_per_prompt
lowerCAmelCase_ :Union[str, Any] = guidance_scale > 1.0
lowerCAmelCase_ :Tuple = self._encode_image(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# prior
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
lowerCAmelCase_ :Tuple = self.scheduler.timesteps
lowerCAmelCase_ :Optional[Any] = self.prior.config.num_embeddings
lowerCAmelCase_ :int = self.prior.config.embedding_dim
lowerCAmelCase_ :List[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowerCAmelCase_ :Dict = latents.reshape(latents.shape[0] , snake_case_ , snake_case_ )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ :Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ :Tuple = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
lowerCAmelCase_ :Any = self.prior(
snake_case_ , timestep=snake_case_ , proj_embedding=snake_case_ , ).predicted_image_embedding
# remove the variance
lowerCAmelCase_ :Tuple = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowerCAmelCase_ :List[Any] = noise_pred.chunk(2 )
lowerCAmelCase_ :Any = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowerCAmelCase_ :int = self.scheduler.step(
snake_case_ , timestep=snake_case_ , sample=snake_case_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=snake_case_ )
lowerCAmelCase_ :Optional[int] = []
for i, latent in enumerate(snake_case_ ):
print()
lowerCAmelCase_ :List[Any] = self.renderer.decode(
latent[None, :] , snake_case_ , size=snake_case_ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(snake_case_ )
lowerCAmelCase_ :Tuple = torch.stack(snake_case_ )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
lowerCAmelCase_ :Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
lowerCAmelCase_ :str = [self.numpy_to_pil(snake_case_ ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=snake_case_ )
| 370
|
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A_ :int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
A_ :List[str] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
A_ :Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : Optional[str] =field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCamelCase__ : Optional[str] =field(
default=a , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase__ : Optional[str] =field(
default=a , metadata={"""help""": """The column name of the images in the files. If not set, will try to use 'image' or 'img'."""} , )
UpperCamelCase__ : Optional[str] =field(default=a , metadata={"""help""": """A folder containing the training data."""} )
UpperCamelCase__ : Optional[str] =field(default=a , metadata={"""help""": """A folder containing the validation data."""} )
UpperCamelCase__ : Optional[float] =field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCamelCase__ : int =field(default=3_2 , metadata={"""help""": """The size of the square patches to use for masking."""} )
UpperCamelCase__ : float =field(
default=0.6 , metadata={"""help""": """Percentage of patches to mask."""} , )
UpperCamelCase__ : Optional[int] =field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCamelCase__ : Optional[int] =field(
default=a , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int ={}
if self.train_dir is not None:
__UpperCamelCase : Dict =self.train_dir
if self.validation_dir is not None:
__UpperCamelCase : Any =self.validation_dir
__UpperCamelCase : Dict =data_files if data_files else None
@dataclass
class __A :
"""simple docstring"""
UpperCamelCase__ : str =field(
default=a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a """
"""checkpoint identifier on the hub. """
"""Don't set if you want to train a model from scratch."""
)
} , )
UpperCamelCase__ : Optional[str] =field(
default=a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a )} , )
UpperCamelCase__ : Optional[str] =field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCamelCase__ : Optional[str] =field(
default=a , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
UpperCamelCase__ : Optional[str] =field(
default=a , metadata={"""help""": """Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"""} , )
UpperCamelCase__ : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCamelCase__ : str =field(default=a , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCamelCase__ : bool =field(
default=a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCamelCase__ : Optional[int] =field(
default=a , metadata={
"""help""": (
"""The size (resolution) of each image. If not specified, will use `image_size` of the configuration."""
)
} , )
UpperCamelCase__ : Optional[int] =field(
default=a , metadata={
"""help""": (
"""The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."""
)
} , )
UpperCamelCase__ : Optional[int] =field(
default=a , metadata={"""help""": """Stride to use for the encoder."""} , )
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__=192 , lowerCamelCase__=32 , lowerCamelCase__=4 , lowerCamelCase__=0.6 ):
"""simple docstring"""
__UpperCamelCase : int =input_size
__UpperCamelCase : Any =mask_patch_size
__UpperCamelCase : List[Any] =model_patch_size
__UpperCamelCase : Union[str, Any] =mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
__UpperCamelCase : Any =self.input_size // self.mask_patch_size
__UpperCamelCase : Dict =self.mask_patch_size // self.model_patch_size
__UpperCamelCase : List[Any] =self.rand_size**2
__UpperCamelCase : List[Any] =int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
"""simple docstring"""
__UpperCamelCase : str =np.random.permutation(self.token_count )[: self.mask_count]
__UpperCamelCase : Optional[int] =np.zeros(self.token_count , dtype=lowerCamelCase__ )
__UpperCamelCase : int =1
__UpperCamelCase : Any =mask.reshape((self.rand_size, self.rand_size) )
__UpperCamelCase : Tuple =mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def A ( a_ ) -> int:
__UpperCamelCase : List[str] =torch.stack([example['pixel_values'] for example in examples] )
__UpperCamelCase : Dict =torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def A ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCamelCase : str =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Tuple =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' ,a_ ,a_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCamelCase : Optional[Any] =training_args.get_process_log_level()
logger.setLevel(a_ )
transformers.utils.logging.set_verbosity(a_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__UpperCamelCase : Any =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase : int =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__UpperCamelCase : Union[str, Any] =load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,data_files=data_args.data_files ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCamelCase : int =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,a_ ) and data_args.train_val_split > 0.0:
__UpperCamelCase : int =ds['train'].train_test_split(data_args.train_val_split )
__UpperCamelCase : Optional[Any] =split['train']
__UpperCamelCase : List[str] =split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase : Dict ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__UpperCamelCase : Tuple =AutoConfig.from_pretrained(model_args.config_name_or_path ,**a_ )
elif model_args.model_name_or_path:
__UpperCamelCase : List[str] =AutoConfig.from_pretrained(model_args.model_name_or_path ,**a_ )
else:
__UpperCamelCase : Any =CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(a_ ,'decoder_type' ):
__UpperCamelCase : List[str] ='simmim'
# adapt config
__UpperCamelCase : Dict =model_args.image_size if model_args.image_size is not None else config.image_size
__UpperCamelCase : Optional[Any] =model_args.patch_size if model_args.patch_size is not None else config.patch_size
__UpperCamelCase : int =(
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__UpperCamelCase : Optional[Any] =AutoImageProcessor.from_pretrained(model_args.image_processor_name ,**a_ )
elif model_args.model_name_or_path:
__UpperCamelCase : Tuple =AutoImageProcessor.from_pretrained(model_args.model_name_or_path ,**a_ )
else:
__UpperCamelCase : List[str] ={
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__UpperCamelCase : Dict =IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__UpperCamelCase : List[Any] =AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=a_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info('Training new model from scratch' )
__UpperCamelCase : Tuple =AutoModelForMaskedImageModeling.from_config(a_ )
if training_args.do_train:
__UpperCamelCase : Union[str, Any] =ds['train'].column_names
else:
__UpperCamelCase : Dict =ds['validation'].column_names
if data_args.image_column_name is not None:
__UpperCamelCase : Optional[Any] =data_args.image_column_name
elif "image" in column_names:
__UpperCamelCase : List[Any] ='image'
elif "img" in column_names:
__UpperCamelCase : Optional[int] ='img'
else:
__UpperCamelCase : List[Any] =column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__UpperCamelCase : int =Compose(
[
Lambda(lambda a_ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size ,scale=(0.67, 1.0) ,ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean ,std=image_processor.image_std ),
] )
# create mask generator
__UpperCamelCase : Tuple =MaskGenerator(
input_size=model_args.image_size ,mask_patch_size=data_args.mask_patch_size ,model_patch_size=model_args.patch_size ,mask_ratio=data_args.mask_ratio ,)
def preprocess_images(a_ ):
__UpperCamelCase : Dict =[transforms(a_ ) for image in examples[image_column_name]]
__UpperCamelCase : int =[mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__UpperCamelCase : Optional[int] =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(a_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__UpperCamelCase : int =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(a_ )
# Initialize our trainer
__UpperCamelCase : int =Trainer(
model=a_ ,args=a_ ,train_dataset=ds['train'] if training_args.do_train else None ,eval_dataset=ds['validation'] if training_args.do_eval else None ,tokenizer=a_ ,data_collator=a_ ,)
# Training
if training_args.do_train:
__UpperCamelCase : Optional[Any] =None
if training_args.resume_from_checkpoint is not None:
__UpperCamelCase : List[str] =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCamelCase : List[Any] =last_checkpoint
__UpperCamelCase : List[str] =trainer.train(resume_from_checkpoint=a_ )
trainer.save_model()
trainer.log_metrics('train' ,train_result.metrics )
trainer.save_metrics('train' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCamelCase : Optional[int] =trainer.evaluate()
trainer.log_metrics('eval' ,a_ )
trainer.save_metrics('eval' ,a_ )
# Write model card and (optionally) push to hub
__UpperCamelCase : Tuple ={
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a_ )
else:
trainer.create_model_card(**a_ )
if __name__ == "__main__":
main()
| 71
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Any = logging.get_logger(__name__)
A_ :int = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""vit_msn"""
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-06 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=True , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : int =hidden_size
__UpperCamelCase : List[Any] =num_hidden_layers
__UpperCamelCase : Union[str, Any] =num_attention_heads
__UpperCamelCase : List[str] =intermediate_size
__UpperCamelCase : Union[str, Any] =hidden_act
__UpperCamelCase : str =hidden_dropout_prob
__UpperCamelCase : Union[str, Any] =attention_probs_dropout_prob
__UpperCamelCase : Union[str, Any] =initializer_range
__UpperCamelCase : Tuple =layer_norm_eps
__UpperCamelCase : Optional[Any] =image_size
__UpperCamelCase : Optional[int] =patch_size
__UpperCamelCase : Any =num_channels
__UpperCamelCase : str =qkv_bias
| 71
| 1
|
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = sorted(numsa + numsa )
UpperCAmelCase , UpperCAmelCase = divmod(len(lowerCAmelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : Tuple = [float(x) for x in input('''Enter the elements of first array: ''').split()]
lowerCAmelCase_ : str = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
| 371
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase , lowerCAmelCase )
# Predict target for test data
UpperCAmelCase = xgb.predict(lowerCAmelCase )
UpperCAmelCase = predictions.reshape(len(lowerCAmelCase ) , 1 )
return predictions
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = fetch_california_housing()
UpperCAmelCase , UpperCAmelCase = data_handling(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_test_split(
lowerCAmelCase , lowerCAmelCase , test_size=0.25 , random_state=1 )
UpperCAmelCase = xgboost(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase , lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCAmelCase , lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 248
| 0
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( a__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Any =PhobertTokenizer
UpperCamelCase__ : Union[str, Any] =False
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase : List[Any] =['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
__UpperCamelCase : List[Any] =dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__UpperCamelCase : Union[str, Any] =['#version: 0.2', 'l à</w>']
__UpperCamelCase : str ={'unk_token': '<unk>'}
__UpperCamelCase : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any ='Tôi là VinAI Research'
__UpperCamelCase : Dict ='T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase : Union[str, Any] ='Tôi là VinAI Research'
__UpperCamelCase : List[str] ='T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
__UpperCamelCase : Tuple =tokenizer.tokenize(_UpperCAmelCase )
print(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase : Union[str, Any] =tokens + [tokenizer.unk_token]
__UpperCamelCase : int =[4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
| 71
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase = "▁" , _UpperCAmelCase = True , _UpperCAmelCase = "<unk>" , _UpperCAmelCase = "</s>" , _UpperCAmelCase = "<pad>" , ):
'''simple docstring'''
__A : Dict = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
__A : List[Any] = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
__A : List[str] = token_dict['token']
__A : str = Tokenizer(Unigram())
__A : Dict = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}') , ' '),
normalizers.Lowercase(),
])
__A : Union[str, Any] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase),
pre_tokenizers.Digits(individual_digits=_UpperCAmelCase),
pre_tokenizers.Punctuation(),
])
__A : Any = decoders.Metaspace(replacement=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase)
__A : Dict = TemplateProcessing(
single=F'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
__A : Any = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 8000 , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : str = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Union[str, Any] = [files]
self._tokenizer.train(_UpperCAmelCase , trainer=_UpperCAmelCase)
self.add_unk_id()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = 8000 , _UpperCAmelCase = True , ):
'''simple docstring'''
__A : Dict = trainers.UnigramTrainer(
vocab_size=_UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=_UpperCAmelCase , )
self._tokenizer.train_from_iterator(_UpperCAmelCase , trainer=_UpperCAmelCase)
self.add_unk_id()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = json.loads(self._tokenizer.to_str())
__A : Union[str, Any] = self.special_tokens['unk']['id']
__A : str = Tokenizer.from_str(json.dumps(_UpperCAmelCase))
| 190
| 0
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
lowercase__ : Any = 0
lowercase__ : List[Any] = len(SCREAMING_SNAKE_CASE_ )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return arr, 0
lowercase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) // 2
lowercase__ : List[Any] = arr[0:mid]
lowercase__ : str = arr[mid:]
lowercase__ , lowercase__ : Any = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ : List[Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ : str = _count_cross_inversions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Optional[int] = []
lowercase__ : Optional[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ) and j < len(SCREAMING_SNAKE_CASE_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : int = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase__ : Any = count_inversions_bf(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ : Optional[int] = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , SCREAMING_SNAKE_CASE_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase__ : Optional[int] = count_inversions_bf(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ : Any = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE_ )
# an empty list should also have zero inversions
lowercase__ : Dict = []
lowercase__ : str = count_inversions_bf(SCREAMING_SNAKE_CASE_ )
lowercase__ , lowercase__ : Union[str, Any] = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 216
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[Any] = """speech_to_text_2"""
__lowerCamelCase : str = ["""past_key_values"""]
__lowerCamelCase : List[Any] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , a=1_0000 , a=6 , a=2048 , a=4 , a=0.0 , a=True , a="relu" , a=256 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=2 , a=True , a=1 , a=0 , a=2 , a=1024 , **a , ):
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = d_model
lowercase__ : int = decoder_ffn_dim
lowercase__ : Optional[Any] = decoder_layers
lowercase__ : int = decoder_attention_heads
lowercase__ : Dict = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : Optional[int] = activation_dropout
lowercase__ : Optional[int] = activation_function
lowercase__ : Dict = init_std
lowercase__ : List[Any] = decoder_layerdrop
lowercase__ : int = use_cache
lowercase__ : Any = decoder_layers
lowercase__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : Tuple = max_target_positions
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 216
| 1
|
def a ( snake_case__: int ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('''Input value must be an \'int\' type''' )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
snake_case__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __snake_case( ) -> List[str]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[Any] =logging.get_logger(__name__)
lowerCamelCase : List[str] ={
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __a ( A__ ):
_lowerCAmelCase : List[Any] = '''camembert'''
def __init__( self : int , SCREAMING_SNAKE_CASE : str=3_05_22 , SCREAMING_SNAKE_CASE : Optional[int]=7_68 , SCREAMING_SNAKE_CASE : List[str]=12 , SCREAMING_SNAKE_CASE : List[str]=12 , SCREAMING_SNAKE_CASE : Any=30_72 , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : List[Any]=5_12 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Any=0.0_2 , SCREAMING_SNAKE_CASE : Optional[int]=1e-1_2 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=None , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : Dict = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : List[Any] = hidden_act
UpperCamelCase__ : List[Any] = intermediate_size
UpperCamelCase__ : Optional[Any] = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : Optional[Any] = max_position_embeddings
UpperCamelCase__ : Optional[int] = type_vocab_size
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Any = position_embedding_type
UpperCamelCase__ : Tuple = use_cache
UpperCamelCase__ : Optional[Any] = classifier_dropout
class __a ( A__ ):
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 196
|
lowerCamelCase : Optional[int] ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
UpperCamelCase__ : Optional[Any] = set()
# keep track of all the paths to be checked
UpperCamelCase__ : Optional[Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase__ : int = queue.pop(0 )
# get the last node from the path
UpperCamelCase__ : Dict = path[-1]
if node not in explored:
UpperCamelCase__ : Tuple = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase__ : List[str] = list(__lowerCAmelCase )
new_path.append(__lowerCAmelCase )
queue.append(__lowerCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__lowerCAmelCase )
# in case there's no path between the 2 nodes
return []
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase__ : Tuple = [start]
UpperCamelCase__ : Optional[int] = set(__lowerCAmelCase )
# Keep tab on distances from `start` node.
UpperCamelCase__ : str = {start: 0, target: -1}
while queue:
UpperCamelCase__ : Any = queue.pop(0 )
if node == target:
UpperCamelCase__ : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__lowerCAmelCase )
queue.append(__lowerCAmelCase )
UpperCamelCase__ : List[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 196
| 1
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_a = 8
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict=BITS ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = x.device
__lowerCAmelCase: List[Any] = (x * 2_55).int().clamp(0 , 2_55 )
__lowerCAmelCase: Dict = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = rearrange(SCREAMING_SNAKE_CASE , 'd -> d 1 1' )
__lowerCAmelCase: Any = rearrange(SCREAMING_SNAKE_CASE , 'b c h w -> b c 1 h w' )
__lowerCAmelCase: List[Any] = ((x & mask) != 0).float()
__lowerCAmelCase: Any = rearrange(SCREAMING_SNAKE_CASE , 'b c d h w -> b (c d) h w' )
__lowerCAmelCase: str = bits * 2 - 1
return bits
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any=BITS ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = x.device
__lowerCAmelCase: List[str] = (x > 0).int()
__lowerCAmelCase: List[str] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE , dtype=torch.intaa )
__lowerCAmelCase: Union[str, Any] = rearrange(SCREAMING_SNAKE_CASE , 'd -> d 1 1' )
__lowerCAmelCase: Union[str, Any] = rearrange(SCREAMING_SNAKE_CASE , 'b (c d) h w -> b c d h w' , d=8 )
__lowerCAmelCase: Optional[Any] = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 2_55).clamp(0.0 , 1.0 )
def _a ( self : Optional[int] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCAmelCase: List[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCAmelCase: Optional[Any] = self.alphas_cumprod[timestep]
__lowerCAmelCase: int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCAmelCase: Union[str, Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase: int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCAmelCase: List[str] = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase: Tuple = torch.clamp(SCREAMING_SNAKE_CASE , -scale , SCREAMING_SNAKE_CASE )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowerCAmelCase: Union[str, Any] = self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCAmelCase: str = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase: Optional[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase: Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCAmelCase: Optional[int] = model_output.device if torch.is_tensor(SCREAMING_SNAKE_CASE ) else 'cpu'
__lowerCAmelCase: Union[str, Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise
__lowerCAmelCase: Optional[Any] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Any="epsilon" , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = torch.split(SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
__lowerCAmelCase: Any = None
# 1. compute alphas, betas
__lowerCAmelCase: List[str] = self.alphas_cumprod[t]
__lowerCAmelCase: Optional[int] = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCAmelCase: Optional[Any] = 1 - alpha_prod_t
__lowerCAmelCase: List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCAmelCase: Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCAmelCase: Optional[Any] = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
__lowerCAmelCase: Union[str, Any] = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase: int = torch.clamp(SCREAMING_SNAKE_CASE , -scale , SCREAMING_SNAKE_CASE )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase: Optional[int] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCAmelCase: Optional[Any] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase: int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCAmelCase: Union[str, Any] = 0
if t > 0:
__lowerCAmelCase: List[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=SCREAMING_SNAKE_CASE ).to(model_output.device )
__lowerCAmelCase: Optional[Any] = (self._get_variance(SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
__lowerCAmelCase: Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE )
class A_ ( snake_case__ ):
def __init__( self : Optional[int] , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , UpperCAmelCase : Optional[float] = 1.0 , ) -> str:
super().__init__()
__lowerCAmelCase: List[str] = bit_scale
__lowerCAmelCase: str = (
ddim_bit_scheduler_step if isinstance(UpperCAmelCase , UpperCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : Optional[int] = 2_5_6 , UpperCAmelCase : Optional[int] = 2_5_6 , UpperCAmelCase : Optional[int] = 5_0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , **UpperCAmelCase : str , ) -> Union[Tuple, ImagePipelineOutput]:
__lowerCAmelCase: str = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCAmelCase , )
__lowerCAmelCase: str = decimal_to_bits(UpperCAmelCase ) * self.bit_scale
__lowerCAmelCase: str = latents.to(self.device )
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCAmelCase: Any = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase: Optional[int] = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Tuple = bits_to_decimal(UpperCAmelCase )
if output_type == "pil":
__lowerCAmelCase: Optional[int] = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 322
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
__lowerCAmelCase: int = 0
__lowerCAmelCase: Tuple = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowerCAmelCase: Tuple = i + 1
else:
__lowerCAmelCase: List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 1_1, 1_5], 9) = }")
| 322
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_a : Union[str, Any] = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 361
|
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_a : List[Any] = logging.get_logger(__name__)
class __A :
_UpperCamelCase : str = None
@experimental
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : List[str] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[int] ) -> List[Any]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
return _map_with_joblib(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ,_lowerCamelCase : Tuple ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Tuple ) -> Union[str, Any]:
_lowerCAmelCase : int = num_proc if num_proc <= len(_lowerCamelCase ) else len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = [] # We organize the splits ourselve (contiguous splits)
for index in range(_lowerCamelCase ):
_lowerCAmelCase : List[str] = len(_lowerCamelCase ) // num_proc
_lowerCAmelCase : str = len(_lowerCamelCase ) % num_proc
_lowerCAmelCase : Tuple = div * index + min(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : int = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_lowerCamelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"Error dividing inputs iterable among processes. "
f"Total number of objects {len(_lowerCamelCase )}, "
f"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
f"Spawning {num_proc} processes for {len(_lowerCamelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
_lowerCAmelCase , _lowerCAmelCase : List[str] = None, None
if not disable_tqdm:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = (RLock(),), tqdm.set_lock
with Pool(_lowerCamelCase ,initargs=_lowerCamelCase ,initializer=_lowerCamelCase ) as pool:
_lowerCAmelCase : str = pool.map(_lowerCamelCase ,_lowerCamelCase )
logger.info(f"Finished {num_proc} processes" )
_lowerCAmelCase : int = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"Unpacked {len(_lowerCamelCase )} objects" )
return mapped
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : int ,_lowerCamelCase : Dict ,_lowerCamelCase : Tuple ,_lowerCamelCase : Any ,_lowerCamelCase : str ) -> Optional[Any]:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=_lowerCamelCase ):
return joblib.Parallel()(
joblib.delayed(_lowerCamelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Any:
_lowerCAmelCase : List[Any] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_lowerCAmelCase : Optional[Any] = None
| 126
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.