code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def __A ( lowerCamelCase_ ):
"""simple docstring"""
if not sentence:
return ""
SCREAMING_SNAKE_CASE : Any = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 323 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323 | 1 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase_ ( _lowerCamelCase: Dict , _lowerCamelCase: int="shi-labs/oneformer_demo" ) -> Tuple:
'''simple docstring'''
with open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) as f:
__lowerCamelCase : Tuple = json.load(_lowerCamelCase )
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = []
for key, info in class_info.items():
__lowerCamelCase : Any = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_lowerCamelCase ) )
__lowerCamelCase : str = thing_ids
__lowerCamelCase : int = class_names
return metadata
class _snake_case ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=7 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : int=30 , UpperCAmelCase : str=400 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , UpperCAmelCase : List[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase : Any=10 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Union[str, Any]=255 , UpperCAmelCase : str="shi-labs/oneformer_demo" , UpperCAmelCase : Dict="ade20k_panoptic.json" , UpperCAmelCase : List[Any]=10 , ):
__lowerCamelCase : str = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : List[str] = min_resolution
__lowerCamelCase : str = max_resolution
__lowerCamelCase : List[Any] = do_resize
__lowerCamelCase : Optional[int] = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size
__lowerCamelCase : List[str] = do_normalize
__lowerCamelCase : List[Any] = image_mean
__lowerCamelCase : List[str] = image_std
__lowerCamelCase : List[str] = class_info_file
__lowerCamelCase : str = prepare_metadata(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : str = num_text
__lowerCamelCase : Dict = repo_path
# for the post_process_functions
__lowerCamelCase : Tuple = 2
__lowerCamelCase : Optional[int] = 10
__lowerCamelCase : str = 10
__lowerCamelCase : Any = 3
__lowerCamelCase : Union[str, Any] = 4
__lowerCamelCase : List[str] = num_labels
__lowerCamelCase : Tuple = do_reduce_labels
__lowerCamelCase : List[Any] = ignore_index
def lowerCamelCase__ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple=False ):
if not batched:
__lowerCamelCase : Union[str, Any] = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
__lowerCamelCase : int = image.size
else:
__lowerCamelCase : Optional[Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase : Tuple = int(self.size["shortest_edge"] * h / w )
__lowerCamelCase : Optional[Any] = self.size["shortest_edge"]
elif w > h:
__lowerCamelCase : int = self.size["shortest_edge"]
__lowerCamelCase : Optional[int] = int(self.size["shortest_edge"] * w / h )
else:
__lowerCamelCase : Union[str, Any] = self.size["shortest_edge"]
__lowerCamelCase : str = self.size["shortest_edge"]
else:
__lowerCamelCase : List[str] = []
for image in image_inputs:
__lowerCamelCase : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase : Optional[Any] = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
__lowerCamelCase : Optional[Any] = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
def lowerCamelCase__ ( self : Optional[int] ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case__ = image_processing_class
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Dict = OneFormerImageProcessorTester(self )
@property
def lowerCamelCase__ ( self : int ):
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "ignore_index" ) )
self.assertTrue(hasattr(UpperCAmelCase , "class_info_file" ) )
self.assertTrue(hasattr(UpperCAmelCase , "num_text" ) )
self.assertTrue(hasattr(UpperCAmelCase , "repo_path" ) )
self.assertTrue(hasattr(UpperCAmelCase , "metadata" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_reduce_labels" ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : List[Any] ):
# Initialize image_processor
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase : Dict = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
__lowerCamelCase : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : int = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
__lowerCamelCase : Optional[int] = image_processor(
UpperCAmelCase , ["semantic"] * len(UpperCAmelCase ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : str ):
# Initialize image_processor
__lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase : Optional[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
__lowerCamelCase : Optional[Any] = self.image_processing_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : Optional[int] = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
__lowerCamelCase : Tuple = image_processor(
UpperCAmelCase , ["semantic"] * len(UpperCAmelCase ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processor
__lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
__lowerCamelCase : str = self.image_processing_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : str = self.image_processing_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
__lowerCamelCase : int = image_processor(
UpperCAmelCase , ["semantic"] * len(UpperCAmelCase ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Dict=False , UpperCAmelCase : Dict="np" ):
__lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCamelCase : Tuple = self.image_processing_tester.num_labels
__lowerCamelCase : str = None
__lowerCamelCase : str = None
__lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCAmelCase )
if with_segmentation_maps:
__lowerCamelCase : Tuple = num_labels
if is_instance_map:
__lowerCamelCase : int = list(range(UpperCAmelCase ) ) * 2
__lowerCamelCase : List[str] = dict(enumerate(UpperCAmelCase ) )
__lowerCamelCase : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCamelCase : Any = [Image.fromarray(UpperCAmelCase ) for annotation in annotations]
__lowerCamelCase : Any = image_processor(
UpperCAmelCase , ["semantic"] * len(UpperCAmelCase ) , UpperCAmelCase , return_tensors="pt" , instance_id_to_semantic_id=UpperCAmelCase , pad_and_return_pixel_mask=UpperCAmelCase , )
return inputs
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : int ):
def common(UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=None ):
__lowerCamelCase : List[Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCAmelCase , is_instance_map=UpperCAmelCase , segmentation_type=UpperCAmelCase )
__lowerCamelCase : Dict = inputs["mask_labels"]
__lowerCamelCase : Tuple = inputs["class_labels"]
__lowerCamelCase : List[str] = inputs["pixel_values"]
__lowerCamelCase : Optional[Any] = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCAmelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCAmelCase )
common(is_instance_map=UpperCAmelCase , segmentation_type="pil" )
common(is_instance_map=UpperCAmelCase , segmentation_type="pil" )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : List[Any] = np.zeros((20, 50) )
__lowerCamelCase : Any = 1
__lowerCamelCase : Any = 1
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : int = binary_mask_to_rle(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
__lowerCamelCase : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase : Optional[Any] = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCamelCase : Union[str, Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCamelCase : str = fature_extractor.post_process_semantic_segmentation(UpperCAmelCase , target_sizes=UpperCAmelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
__lowerCamelCase : Any = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase : int = image_processor.post_process_instance_segmentation(UpperCAmelCase , threshold=0 )
self.assertTrue(len(UpperCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , UpperCAmelCase )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Union[str, Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
__lowerCamelCase : int = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase : int = image_processor.post_process_panoptic_segmentation(UpperCAmelCase , threshold=0 )
self.assertTrue(len(UpperCAmelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , UpperCAmelCase )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) | 361 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "deta"
snake_case__ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Any , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict=900 , UpperCAmelCase : str=2048 , UpperCAmelCase : Optional[Any]=6 , UpperCAmelCase : Optional[Any]=2048 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Union[str, Any]=6 , UpperCAmelCase : int=1024 , UpperCAmelCase : str=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : int=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : List[Any]=0.0_2 , UpperCAmelCase : Optional[Any]=1.0 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=False , UpperCAmelCase : Optional[int]="sine" , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Any=4 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=300 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : str=5 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : str=1 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Tuple=0.2_5 , **UpperCAmelCase : List[str] , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__lowerCamelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : List[Any] = backbone_config.pop("model_type" )
__lowerCamelCase : Any = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : List[Any] = config_class.from_dict(UpperCAmelCase )
__lowerCamelCase : List[str] = backbone_config
__lowerCamelCase : Any = num_queries
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : Dict = d_model
__lowerCamelCase : List[str] = encoder_ffn_dim
__lowerCamelCase : int = encoder_layers
__lowerCamelCase : str = encoder_attention_heads
__lowerCamelCase : Dict = decoder_ffn_dim
__lowerCamelCase : Tuple = decoder_layers
__lowerCamelCase : str = decoder_attention_heads
__lowerCamelCase : Dict = dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : int = activation_dropout
__lowerCamelCase : int = activation_function
__lowerCamelCase : Any = init_std
__lowerCamelCase : Optional[Any] = init_xavier_std
__lowerCamelCase : int = encoder_layerdrop
__lowerCamelCase : Dict = auxiliary_loss
__lowerCamelCase : Optional[int] = position_embedding_type
# deformable attributes
__lowerCamelCase : Tuple = num_feature_levels
__lowerCamelCase : str = encoder_n_points
__lowerCamelCase : List[str] = decoder_n_points
__lowerCamelCase : List[str] = two_stage
__lowerCamelCase : Dict = two_stage_num_proposals
__lowerCamelCase : int = with_box_refine
__lowerCamelCase : Union[str, Any] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__lowerCamelCase : str = class_cost
__lowerCamelCase : Optional[Any] = bbox_cost
__lowerCamelCase : Tuple = giou_cost
# Loss coefficients
__lowerCamelCase : List[Any] = mask_loss_coefficient
__lowerCamelCase : Dict = dice_loss_coefficient
__lowerCamelCase : Any = bbox_loss_coefficient
__lowerCamelCase : Dict = giou_loss_coefficient
__lowerCamelCase : Optional[Any] = eos_coefficient
__lowerCamelCase : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Dict ):
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.d_model
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Dict = self.backbone_config.to_dict()
__lowerCamelCase : str = self.__class__.model_type
return output | 64 | 0 |
"""simple docstring"""
class __lowerCamelCase ( lowerCamelCase_ ):
'''simple docstring'''
pass
class __lowerCamelCase ( lowerCamelCase_ ):
'''simple docstring'''
pass
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Dict:
_a = [
[],
[],
[],
]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(__UpperCAmelCase )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def _UpperCAmelCase ( self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__( self ) -> str:
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class __lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> Optional[int]:
_a = []
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if len(self.queue ) == 100:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
_a = min(self.queue )
self.queue.remove(__UpperCAmelCase )
return data
def __str__( self ) -> Optional[int]:
return str(self.queue )
def A_ ( ):
"""simple docstring"""
_a = FixedPriorityQueue()
fpq.enqueue(0, 10 )
fpq.enqueue(1, 70 )
fpq.enqueue(0, 1_00 )
fpq.enqueue(2, 1 )
fpq.enqueue(2, 5 )
fpq.enqueue(1, 7 )
fpq.enqueue(2, 4 )
fpq.enqueue(1, 64 )
fpq.enqueue(0, 1_28 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def A_ ( ):
"""simple docstring"""
_a = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue() | 320 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
__A = "path-to-your-trained-model"
__A = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
__A = "A photo of sks dog in a bucket"
__A = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 148 | 0 |
from math import factorial
def __snake_case ( _lowerCAmelCase : int = 20 ) -> int:
A_ : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
A_ : str = n // 2
return int(factorial(_lowerCAmelCase ) / (factorial(_lowerCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
_lowerCAmelCase : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 70 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
_lowerCAmelCase : Dict[Optional[str], str] = {}
_lowerCAmelCase : Dict[Optional[str], Exception] = {}
def __snake_case ( _lowerCAmelCase : type , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[List[str]] = None , ) -> List[Any]:
A_ : Any = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
A_ : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
A_ : Union[str, Any] = format_type
def __snake_case ( _lowerCAmelCase : Exception , _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[List[str]] = None ) -> Optional[int]:
A_ : Optional[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
A_ : List[str] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
_lowerCAmelCase : str = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
_lowerCAmelCase : Tuple = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
_lowerCAmelCase : List[str] = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __snake_case ( _lowerCAmelCase : Optional[str] ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __snake_case ( _lowerCAmelCase : Optional[str] , **_lowerCAmelCase : str ) -> Formatter:
A_ : str = get_format_type_from_alias(_lowerCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_lowerCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 70 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
a : Tuple = 0
a : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
a : int = tuple[int, int]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = pos_x
UpperCAmelCase : int = pos_y
UpperCAmelCase : Optional[int] = (pos_y, pos_x)
UpperCAmelCase : int = goal_x
UpperCAmelCase : Union[str, Any] = goal_y
UpperCAmelCase : Union[str, Any] = g_cost
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : List[str] = self.calculate_heuristic()
UpperCAmelCase : Union[str, Any] = self.g_cost + self.h_cost
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.pos_x - self.goal_x
UpperCAmelCase : List[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case ) + abs(snake_case )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case )
UpperCAmelCase : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case )
UpperCAmelCase : Any = [self.start]
UpperCAmelCase : list[Node] = []
UpperCAmelCase : int = False
def A_ ( self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case )
self.closed_nodes.append(snake_case )
UpperCAmelCase : Union[str, Any] = self.get_successors(snake_case )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case )
else:
# retrieve the best current path
UpperCAmelCase : Tuple = self.open_nodes.pop(self.open_nodes.index(snake_case ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case )
else:
self.open_nodes.append(snake_case )
return [self.start.pos]
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : int = []
for action in delta:
UpperCAmelCase : Dict = parent.pos_x + action[1]
UpperCAmelCase : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case , snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case , ) )
return successors
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = node
UpperCAmelCase : List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase : Union[str, Any] = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = AStar(snake_case , snake_case )
UpperCAmelCase : Union[str, Any] = AStar(snake_case , snake_case )
UpperCAmelCase : Union[str, Any] = False
def A_ ( self ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase : Union[str, Any] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase : str = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case , snake_case )
self.fwd_astar.closed_nodes.append(snake_case )
self.bwd_astar.closed_nodes.append(snake_case )
UpperCAmelCase : int = current_bwd_node
UpperCAmelCase : List[str] = current_fwd_node
UpperCAmelCase : Tuple = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case )
else:
# retrieve the best current path
UpperCAmelCase : Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case )
else:
astar.open_nodes.append(snake_case )
return [self.fwd_astar.start.pos]
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.fwd_astar.retrace_path(snake_case )
UpperCAmelCase : int = self.bwd_astar.retrace_path(snake_case )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase : List[str] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
a : int = (0, 0)
a : Tuple = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a : Union[str, Any] = time.time()
a : str = AStar(init, goal)
a : List[Any] = a_star.search()
a : str = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
a : str = time.time()
a : List[Any] = BidirectionalAStar(init, goal)
a : List[Any] = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 311 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
) | 371 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = "linear"
lowercase__ : Any = "cosine"
lowercase__ : Optional[int] = "cosine_with_restarts"
lowercase__ : Optional[Any] = "polynomial"
lowercase__ : Tuple = "constant"
lowercase__ : Optional[int] = "constant_with_warmup"
lowercase__ : Optional[int] = "piecewise_constant"
def _snake_case ( A , A = -1 ) -> Any:
return LambdaLR(A , lambda A : 1 , last_epoch=A )
def _snake_case ( A , A , A = -1 ) -> Optional[Any]:
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1.0 , A ) )
return 1.0
return LambdaLR(A , A , last_epoch=A )
def _snake_case ( A , A , A = -1 ) -> Union[str, Any]:
lowerCAmelCase__ = {}
lowerCAmelCase__ = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
lowerCAmelCase__ , lowerCAmelCase__ = rule_str.split(''':''' )
lowerCAmelCase__ = int(A )
lowerCAmelCase__ = float(A )
lowerCAmelCase__ = value
lowerCAmelCase__ = float(rule_list[-1] )
def create_rules_function(A , A ):
def rule_func(A ) -> float:
lowerCAmelCase__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase__ = create_rules_function(A , A )
return LambdaLR(A , A , last_epoch=A )
def _snake_case ( A , A , A , A=-1 ) -> Optional[int]:
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1 , A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(A , A , A )
def _snake_case ( A , A , A , A = 0.5 , A = -1 ) -> List[str]:
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1 , A ) )
lowerCAmelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(A ) * 2.0 * progress )) )
return LambdaLR(A , A , A )
def _snake_case ( A , A , A , A = 1 , A = -1 ) -> Union[str, Any]:
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1 , A ) )
lowerCAmelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(A ) * progress) % 1.0) )) )
return LambdaLR(A , A , A )
def _snake_case ( A , A , A , A=1E-7 , A=1.0 , A=-1 ) -> Union[str, Any]:
lowerCAmelCase__ = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(A ):
if current_step < num_warmup_steps:
return float(A ) / float(max(1 , A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase__ = lr_init - lr_end
lowerCAmelCase__ = num_training_steps - num_warmup_steps
lowerCAmelCase__ = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(A , A , A )
__UpperCAmelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _snake_case ( A , A , A = None , A = None , A = None , A = 1 , A = 1.0 , A = -1 , ) -> int:
lowerCAmelCase__ = SchedulerType(A )
lowerCAmelCase__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(A , last_epoch=A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(A , step_rules=A , last_epoch=A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(A , num_warmup_steps=A , last_epoch=A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
A , num_warmup_steps=A , num_training_steps=A , num_cycles=A , last_epoch=A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
A , num_warmup_steps=A , num_training_steps=A , power=A , last_epoch=A , )
return schedule_func(
A , num_warmup_steps=A , num_training_steps=A , last_epoch=A ) | 228 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class a_ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self ) ->Tuple:
# test for the above condition
self.test()
def __lowerCAmelCase ( self ) ->int:
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = False
while not completed:
if counter == 1:
self.reset()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
SCREAMING_SNAKE_CASE : str = self.update(_lowerCamelCase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __lowerCAmelCase ( self ) ->Tuple:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self ) ->List[str]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Optional[int]:
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class a_ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Dict:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
SCREAMING_SNAKE_CASE : int = token_ids
SCREAMING_SNAKE_CASE : int = len(self.token_ids )
SCREAMING_SNAKE_CASE : Union[str, Any] = -1 # the index of the currently fulfilled step
SCREAMING_SNAKE_CASE : List[str] = False
def __lowerCAmelCase ( self ) ->Any:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : str = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
SCREAMING_SNAKE_CASE : List[str] = True
if self.fulfilled_idx == (self.seqlen - 1):
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Any = completed
else:
# failed to make progress.
SCREAMING_SNAKE_CASE : List[Any] = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Any = 0
def __lowerCAmelCase ( self ) ->str:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = PhrasalConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : Tuple = self.seqlen
SCREAMING_SNAKE_CASE : int = self.fulfilled_idx
SCREAMING_SNAKE_CASE : int = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=True ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : int = max([len(_lowerCamelCase ) for one in nested_token_ids] )
SCREAMING_SNAKE_CASE : Dict = {}
for token_ids in nested_token_ids:
SCREAMING_SNAKE_CASE : int = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : Optional[Any] = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
F""" {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : Tuple = root
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : Optional[Any] = self.trie
for current_token in current_seq:
SCREAMING_SNAKE_CASE : Tuple = start[current_token]
SCREAMING_SNAKE_CASE : Tuple = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : List[str] = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class a_ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Any:
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
SCREAMING_SNAKE_CASE : Any = DisjunctiveTrie(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nested_token_ids
SCREAMING_SNAKE_CASE : Optional[int] = self.trie.max_height
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Dict = False
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[int]:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}""" )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = True
else:
SCREAMING_SNAKE_CASE : List[str] = True
self.reset()
SCREAMING_SNAKE_CASE : Any = self.trie.reached_leaf(self.current_seq )
SCREAMING_SNAKE_CASE : Tuple = completed
return stepped, completed, reset
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : str = []
def __lowerCAmelCase ( self ) ->Optional[int]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self , _lowerCamelCase=False ) ->List[Any]:
SCREAMING_SNAKE_CASE : int = DisjunctiveConstraint(self.token_ids )
if stateful:
SCREAMING_SNAKE_CASE : str = self.seqlen
SCREAMING_SNAKE_CASE : Optional[Any] = self.current_seq
SCREAMING_SNAKE_CASE : Tuple = self.completed
return new_constraint
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = constraints
# max # of steps required to fulfill a given constraint
SCREAMING_SNAKE_CASE : Any = max([c.seqlen for c in constraints] )
SCREAMING_SNAKE_CASE : Optional[int] = len(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __lowerCAmelCase ( self ) ->Tuple:
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : int = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Dict = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
SCREAMING_SNAKE_CASE : Optional[Any] = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tuple:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
SCREAMING_SNAKE_CASE : Tuple = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = False, False
if self.completed:
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Tuple = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
SCREAMING_SNAKE_CASE : Union[str, Any] = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
SCREAMING_SNAKE_CASE : List[str] = None
if len(self.pending_constraints ) == 0:
# we're done!
SCREAMING_SNAKE_CASE : List[str] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : int = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = None
if not complete and stepped:
SCREAMING_SNAKE_CASE : Dict = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
SCREAMING_SNAKE_CASE : int = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
SCREAMING_SNAKE_CASE : List[str] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self , _lowerCamelCase=True ) ->List[str]:
SCREAMING_SNAKE_CASE : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
SCREAMING_SNAKE_CASE : Any = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 313 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_snake_case : Dict = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_snake_case : List[str] = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
_snake_case : List[str] = "zero2"
_snake_case : Any = "zero3"
_snake_case : Dict = [ZEROa, ZEROa]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__snake_case : Optional[Any] = parameterized.to_safe_name("_".join(str(__lowerCamelCase ) for x in param.args ) )
return F'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
_snake_case : Union[str, Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a (_lowerCAmelCase ):
"""simple docstring"""
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Union[str, Any]:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] ) -> int:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : int ) -> Dict:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase , name_func=lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : str , lowerCamelCase : Any ) -> str:
self.run_and_check(
stage=lowerCamelCase , model=lowerCamelCase , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
def __snake_case ( self : str , lowerCamelCase : List[Any] ) -> Union[str, Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __snake_case ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int = 10 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ) -> Tuple:
__snake_case : Any = models[model]
__snake_case : Tuple = self.run_trainer(
stage=lowerCamelCase , model_name=lowerCamelCase , eval_steps=lowerCamelCase , num_train_epochs=1 , distributed=lowerCamelCase , fpaa=lowerCamelCase , )
self.do_checks(lowerCamelCase )
return output_dir
def __snake_case ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int = 10 , lowerCamelCase : int = 1 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , ) -> Tuple:
__snake_case : Optional[int] = self.get_auto_remove_tmp_dir("./xxx" , after=lowerCamelCase )
__snake_case : Optional[int] = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCamelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__snake_case : Optional[int] = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__snake_case : Dict = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__snake_case : Any = self.get_launcher(lowerCamelCase )
__snake_case : Optional[Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
return output_dir
def __snake_case ( self : str , lowerCamelCase : str=False ) -> Any:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
__snake_case : Dict = min(2 , get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 123 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( _snake_case , _snake_case , unittest.TestCase ):
UpperCamelCase__ : int =StableDiffusionDiffEditPipeline
UpperCamelCase__ : Union[str, Any] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
UpperCamelCase__ : Any =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
UpperCamelCase__ : List[Any] =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ : int =frozenset([] )
def __a ( self :List[Any]) -> Union[str, Any]:
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
UpperCAmelCase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowercase , set_alpha_to_zero=_lowercase , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(_lowercase)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __a ( self :Tuple , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=0) -> Any:
UpperCAmelCase_ = floats_tensor((1, 16, 16) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_lowercase)).to(_lowercase)
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :str , _lowercase :Optional[int] , _lowercase :int=0) -> List[Any]:
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_lowercase)).convert('''RGB''')
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :Optional[int] , _lowercase :Optional[Any] , _lowercase :List[str]=0) -> Tuple:
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase)).to(_lowercase)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_lowercase)).convert('''RGB''')
if str(_lowercase).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_lowercase)
else:
UpperCAmelCase_ = torch.Generator(device=_lowercase).manual_seed(_lowercase)
UpperCAmelCase_ = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def __a ( self :str) -> Optional[int]:
if not hasattr(self.pipeline_class , '''_optional_components'''):
return
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase)
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components})
UpperCAmelCase_ = self.get_dummy_inputs(_lowercase)
UpperCAmelCase_ = pipe(**_lowercase)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase)
UpperCAmelCase_ = self.pipeline_class.from_pretrained(_lowercase)
pipe_loaded.to(_lowercase)
pipe_loaded.set_progress_bar_config(disable=_lowercase)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase) is None , f"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ = self.get_dummy_inputs(_lowercase)
UpperCAmelCase_ = pipe_loaded(**_lowercase)[0]
UpperCAmelCase_ = np.abs(output - output_loaded).max()
self.assertLess(_lowercase , 1E-4)
def __a ( self :Dict) -> Tuple:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_mask_inputs(_lowercase)
UpperCAmelCase_ = pipe.generate_mask(**_lowercase)
UpperCAmelCase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16))
UpperCAmelCase_ = np.array([0] * 9)
UpperCAmelCase_ = np.abs(mask_slice.flatten() - expected_slice).max()
self.assertLessEqual(_lowercase , 1E-3)
self.assertEqual(mask[0, -3, -4] , 0)
def __a ( self :Optional[Any]) -> Optional[Any]:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_inversion_inputs(_lowercase)
UpperCAmelCase_ = pipe.invert(**_lowercase).images
UpperCAmelCase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
UpperCAmelCase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_lowercase , 1E-3)
def __a ( self :List[Any]) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=5E-3)
def __a ( self :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ = DPMSolverMultistepScheduler(**_lowercase)
UpperCAmelCase_ = DPMSolverMultistepInverseScheduler(**_lowercase)
UpperCAmelCase_ = self.pipeline_class(**_lowercase)
pipe.to(_lowercase)
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = self.get_dummy_inversion_inputs(_lowercase)
UpperCAmelCase_ = pipe.invert(**_lowercase).images
UpperCAmelCase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3))
UpperCAmelCase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_lowercase , 1E-3)
@require_torch_gpu
@slow
class a_ ( unittest.TestCase ):
def __a ( self :Union[str, Any]) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __a ( cls :List[str]) -> Dict:
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''')
UpperCAmelCase_ = raw_image.convert('''RGB''').resize((768, 768))
UpperCAmelCase_ = raw_image
def __a ( self :str) -> int:
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_lowercase , torch_dtype=torch.floataa)
UpperCAmelCase_ = DDIMScheduler.from_config(pipe.scheduler.config)
UpperCAmelCase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = '''a bowl of fruit'''
UpperCAmelCase_ = '''a bowl of pears'''
UpperCAmelCase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowercase , target_prompt=_lowercase , generator=_lowercase , )
UpperCAmelCase_ = pipe.invert(
prompt=_lowercase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowercase).latents
UpperCAmelCase_ = pipe(
prompt=_lowercase , mask_image=_lowercase , image_latents=_lowercase , generator=_lowercase , negative_prompt=_lowercase , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
def __a ( self :Tuple) -> Any:
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_lowercase , torch_dtype=torch.floataa)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowercase)
UpperCAmelCase_ = '''a bowl of fruit'''
UpperCAmelCase_ = '''a bowl of pears'''
UpperCAmelCase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowercase , target_prompt=_lowercase , generator=_lowercase , )
UpperCAmelCase_ = pipe.invert(
prompt=_lowercase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowercase , num_inference_steps=25 , ).latents
UpperCAmelCase_ = pipe(
prompt=_lowercase , mask_image=_lowercase , image_latents=_lowercase , generator=_lowercase , negative_prompt=_lowercase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''').resize((768, 768)))
/ 255
)
assert np.abs((expected_image - image).max()) < 5E-1
| 344 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
UpperCamelCase_ = False
UpperCamelCase_ = False
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
return TrainCommand(__UpperCAmelCase )
class a_ ( _snake_case ):
@staticmethod
def __a ( _lowercase :ArgumentParser) -> List[Any]:
UpperCAmelCase_ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''')
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''')
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''')
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''')
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''')
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''')
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''')
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''')
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''')
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=32 , help='''Batch size for training.''')
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=64 , help='''Batch size for validation.''')
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3E-5 , help='''Learning rate.''')
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1E-0_8 , help='''Epsilon for Adam optimizer.''')
train_parser.set_defaults(func=_lowercase)
def __init__( self :Union[str, Any] , _lowercase :Namespace) -> Union[str, Any]:
UpperCAmelCase_ = logging.get_logger('''transformers-cli/training''')
UpperCAmelCase_ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase)
UpperCAmelCase_ = args.output
UpperCAmelCase_ = args.column_label
UpperCAmelCase_ = args.column_text
UpperCAmelCase_ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
UpperCAmelCase_ = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
UpperCAmelCase_ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
UpperCAmelCase_ = args.validation_split
UpperCAmelCase_ = args.train_batch_size
UpperCAmelCase_ = args.valid_batch_size
UpperCAmelCase_ = args.learning_rate
UpperCAmelCase_ = args.adam_epsilon
def __a ( self :int) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __a ( self :Optional[Any]) -> Any:
raise NotImplementedError
def __a ( self :int) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 344 | 1 |
'''simple docstring'''
import cva
import numpy as np
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase ):
if k in (0.0_4, 0.0_6):
__lowerCAmelCase = k
__lowerCAmelCase = window_size
else:
raise ValueError('''invalid k value''' )
def __str__(self ):
return str(self.k )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = cva.imread(__lowercase , 0 )
__lowerCAmelCase , __lowerCAmelCase = img.shape
__lowerCAmelCase = []
__lowerCAmelCase = img.copy()
__lowerCAmelCase = cva.cvtColor(__lowercase , cva.COLOR_GRAY2RGB )
__lowerCAmelCase , __lowerCAmelCase = np.gradient(__lowercase )
__lowerCAmelCase = dx**2
__lowerCAmelCase = dy**2
__lowerCAmelCase = dx * dy
__lowerCAmelCase = 0.0_4
__lowerCAmelCase = self.window_size // 2
for y in range(__lowercase , h - offset ):
for x in range(__lowercase , w - offset ):
__lowerCAmelCase = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__lowerCAmelCase = (wxx * wyy) - (wxy**2)
__lowerCAmelCase = wxx + wyy
__lowerCAmelCase = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 2_55 )
return color_img, corner_list
if __name__ == "__main__":
_UpperCAmelCase : Dict = HarrisCorner(0.04, 3)
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 174 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
__UpperCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__UpperCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case (self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCAmelCase = CLIPTextModel(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase )
__lowerCAmelCase = CLIPTextModelWithProjection(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = image / 2 + 0.5
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = sd_pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case (self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _snake_case (self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _snake_case (self ):
pass
def _snake_case (self ):
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
# forward without prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 3 * ['''this is a negative prompt''']
__lowerCAmelCase = negative_prompt
__lowerCAmelCase = 3 * [inputs['''prompt''']]
__lowerCAmelCase = sd_pipe(**__lowercase )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 3 * ['''this is a negative prompt''']
__lowerCAmelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = sd_pipe.encode_prompt(__lowercase , negative_prompt=__lowercase )
__lowerCAmelCase = sd_pipe(
**__lowercase , prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , pooled_prompt_embeds=__lowercase , negative_pooled_prompt_embeds=__lowercase , )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ):
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = np.random.RandomState(__lowercase ).standard_normal((1, 4, 64, 64) )
__lowerCAmelCase = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__lowerCAmelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 174 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase : Optional[int] = get_tests_dir("fixtures/dummy-config.json")
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[int] = 0
def __magic_name__ ( self : List[Any] ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : Dict = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(__A, __A )
def __magic_name__ ( self : Any ):
UpperCAmelCase : List[str] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A, __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Dict = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A, __A )
def __magic_name__ ( self : str ):
UpperCAmelCase : Any = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(__A, __A )
def __magic_name__ ( self : str ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
UpperCAmelCase : List[str] = os.path.join(__A, '''fake-roberta''' )
os.makedirs(__A, exist_ok=__A )
with open(os.path.join(__A, '''config.json''' ), '''w''' ) as f:
f.write(json.dumps({} ) )
UpperCAmelCase : Tuple = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ), __A )
def __magic_name__ ( self : Optional[int] ):
try:
AutoConfig.register('''custom''', __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register('''model''', __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register('''bert''', __A )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A, __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __magic_name__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__A, '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase : Any = AutoConfig.from_pretrained('''bert-base''' )
def __magic_name__ ( self : List[Any] ):
with self.assertRaisesRegex(
__A, R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase : Any = AutoConfig.from_pretrained(__A, revision='''aaaaaa''' )
def __magic_name__ ( self : int ):
with self.assertRaisesRegex(
__A, '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''', ):
UpperCAmelCase : List[str] = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def __magic_name__ ( self : str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
UpperCAmelCase : List[str] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
UpperCAmelCase : List[str] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''', trust_remote_code=__A )
UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''', trust_remote_code=__A )
self.assertEqual(config.__class__.__name__, '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
UpperCAmelCase : Any = AutoConfig.from_pretrained(__A, trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__, '''NewModelConfig''' )
def __magic_name__ ( self : Optional[Any] ):
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """new-model"""
try:
AutoConfig.register('''new-model''', __A )
# If remote code is not set, the default is to use local
UpperCAmelCase : Dict = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__, '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
UpperCAmelCase : List[str] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''', trust_remote_code=__A )
self.assertEqual(config.__class__.__name__, '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''', trust_remote_code=__A )
self.assertEqual(config.__class__.__name__, '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 360 |
def a__ ( UpperCAmelCase : int ) -> bool:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
UpperCAmelCase : List[str] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase )
if number < 0:
return False
UpperCAmelCase : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __A( a ):
def __init__( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__a = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self , _snake_case = 1 , _snake_case = None , _snake_case = 0.0 , _snake_case = 50 , _snake_case = None , _snake_case = "pil" , _snake_case = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(self.unet.config.sample_size , _snake_case ):
__a = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__a = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_snake_case , _snake_case ) and len(_snake_case ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_snake_case )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__a = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__a = self.unet(_snake_case , _snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__a = self.scheduler.step(
_snake_case , _snake_case , _snake_case , eta=_snake_case , use_clipped_model_output=_snake_case , generator=_snake_case ).prev_sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case ) | 6 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = R'\w+[.]\d+'
A_ : int = re.findall(_UpperCAmelCase , _UpperCAmelCase )
for pat in pats:
A_ : Optional[int] = key.replace(_UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A_ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A_ : List[str] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A_ : Optional[Any] = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A_ : int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A_ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A_ : Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A_ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A_ : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A_ : Optional[int] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=42 ):
"""simple docstring"""
A_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A_ : Union[str, Any] = flax_model.init_weights(PRNGKey(_UpperCAmelCase ) )
A_ : Optional[Any] = flatten_dict(_UpperCAmelCase )
A_ : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A_ : Any = rename_key(_UpperCAmelCase )
A_ : List[str] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A_ , A_ : Union[str, Any] = rename_key_and_reshape_tensor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A_ : str = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase ) | 286 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a__ ( snake_case__ ):
_a : Optional[int] = ["""image_processor""", """tokenizer"""]
_a : List[Any] = """BlipImageProcessor"""
_a : Optional[Any] = """AutoTokenizer"""
def __init__( self , _A , _A , _A ):
"""simple docstring"""
super().__init__(_A , _A )
# add QFormer tokenizer
__lowerCAmelCase = qformer_tokenizer
def __call__( self , _A = None , _A = None , _A = True , _A = False , _A = None , _A = None , _A = 0 , _A = None , _A = None , _A = False , _A = False , _A = False , _A = False , _A = False , _A = True , _A = None , **_A , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
__lowerCAmelCase = BatchFeature()
if text is not None:
__lowerCAmelCase = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
encoding.update(_A )
__lowerCAmelCase = self.qformer_tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
__lowerCAmelCase = qformer_text_encoding.pop("input_ids" )
__lowerCAmelCase = qformer_text_encoding.pop("attention_mask" )
if images is not None:
__lowerCAmelCase = self.image_processor(_A , return_tensors=_A )
encoding.update(_A )
return encoding
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def __SCREAMING_SNAKE_CASE( self , *_A , **_A ):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def __SCREAMING_SNAKE_CASE( self , _A , **_A ):
"""simple docstring"""
if os.path.isfile(_A ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_A , exist_ok=_A )
__lowerCAmelCase = os.path.join(_A , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(_A )
return super().save_pretrained(_A , **_A )
@classmethod
def __SCREAMING_SNAKE_CASE( cls , _A , **_A ):
"""simple docstring"""
__lowerCAmelCase = AutoTokenizer.from_pretrained(_A , subfolder="qformer_tokenizer" )
__lowerCAmelCase = cls._get_arguments_from_pretrained(_A , **_A )
args.append(_A )
return cls(*_A )
| 102 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__ :
def __init__( self , _A , _A=1_3 , _A=3_0 , _A=2 , _A=3 , _A=True , _A=True , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=3 , _A=None , _A=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModel(config=_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTForMaskedImageModeling(config=_A )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFDeiTForMaskedImageModeling(_A )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = TFDeiTForImageClassification(_A )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = TFDeiTForImageClassification(_A )
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_a : Optional[Any] = (
{
"""feature-extraction""": TFDeiTModel,
"""image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_a : str = False
_a : str = False
_a : List[str] = False
_a : Optional[int] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Dense ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFDeiTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _a ( ):
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_A , return_tensors="tf" )
# forward pass
__lowerCAmelCase = model(**_A )
# verify the logits
__lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
__lowerCAmelCase = tf.constant([-1.02_66, 0.19_12, -1.28_61] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
| 102 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a__ :
def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Tuple=13 , a : Optional[Any]=7 , a : List[Any]=True , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=99 , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Optional[int]=5_12 , a : int=16 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Any=3 , a : Dict=4 , a : Any=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : List[Any] , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , head_mask=a )
__lowerCamelCase = model(a , token_type_ids=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Tuple , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , *a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTDoubleHeadsModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : Dict , a : Optional[Any] , a : str , *a : int ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = OpenAIGPTForSequenceClassification(a )
model.to(a )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase : Optional[int] =(
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : Optional[int] , a : int , a : str , a : Any ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : Optional[int] , a : str=False ):
"""simple docstring"""
__lowerCamelCase = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , )
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = OpenAIGPTModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(a )
__lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=a ) # the president is
__lowerCamelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowerCamelCase = model.generate(a , do_sample=a )
self.assertListEqual(output_ids[0].tolist() , a )
| 67 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : int=32 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Tuple=[10, 20, 30, 40] , UpperCAmelCase_ : str=[2, 2, 3, 2] , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : List[str]=10 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : str=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : List[Any]=None , ) ->Any:
'''simple docstring'''
lowerCamelCase__: str =parent
lowerCamelCase__: Tuple =batch_size
lowerCamelCase__: List[Any] =image_size
lowerCamelCase__: Optional[Any] =num_channels
lowerCamelCase__: Tuple =num_stages
lowerCamelCase__: Optional[Any] =hidden_sizes
lowerCamelCase__: List[Any] =depths
lowerCamelCase__: Optional[Any] =is_training
lowerCamelCase__: List[Any] =use_labels
lowerCamelCase__: Optional[int] =intermediate_size
lowerCamelCase__: Any =hidden_act
lowerCamelCase__: Dict =type_sequence_label_size
lowerCamelCase__: List[Any] =initializer_range
lowerCamelCase__: Union[str, Any] =out_features
lowerCamelCase__: Union[str, Any] =num_labels
lowerCamelCase__: List[str] =scope
lowerCamelCase__: Tuple =num_stages
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Dict =None
if self.use_labels:
lowerCamelCase__: int =ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCamelCase__: Optional[int] =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCAmelCase_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCAmelCase_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =UperNetForSemanticSegmentation(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
): Optional[int] =config_and_inputs
lowerCamelCase__: List[Any] ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: int =UperNetModelTester(self)
lowerCamelCase__: Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->int:
'''simple docstring'''
return
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: List[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: List[Any] =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Tuple =[*signature.parameters.keys()]
lowerCamelCase__: Dict =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->int:
'''simple docstring'''
lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_)
@unittest.skip(reason="UperNet does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings")
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model")
def SCREAMING_SNAKE_CASE_ (self : Any) ->Dict:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`")
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Any:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]):
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: int =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: List[Any] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__: Optional[int] =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Dict =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: List[Any] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__: Union[str, Any] =_config_zero_init(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =_config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[int] =model_class(config=UpperCAmelCase_)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->int:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Dict =UperNetForSemanticSegmentation.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
lowerCamelCase__: Dict =Image.open(__a ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: str =AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny")
lowerCamelCase__: Optional[Any] =UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny").to(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =prepare_img()
lowerCamelCase__: Tuple =processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: Tuple =model(**UpperCAmelCase_)
lowerCamelCase__: Dict =torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: Any =torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=1E-4))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny")
lowerCamelCase__: List[Any] =UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny").to(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =prepare_img()
lowerCamelCase__: List[str] =processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
with torch.no_grad():
lowerCamelCase__: Optional[Any] =model(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =torch.Size((1, model.config.num_labels, 512, 512))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: List[Any] =torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=1E-4))
| 273 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["torch", "torchsde"]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int]) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"])
| 273 | 1 |
"""simple docstring"""
from collections import namedtuple
_a = namedtuple('from_to', 'from_ to')
_a = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ", ".join(__lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ", ".join(__lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : str ) -> Dict:
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ) -> Optional[Any]:
_a : Any =tmp_path / """cache"""
_a : int ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Tuple =SqlDatasetReader(
"""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ) -> List[Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : str ={"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_a : Optional[int] =features.copy() if features else default_expected_features
_a : Union[str, Any] =(
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read()
_check_sql_dataset(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
_a : Any =con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> Union[str, Any]:
_a : Union[str, Any] =tmp_path / """cache"""
_a : Union[str, Any] =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Tuple =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=1 ).write()
_a : Tuple =iter_sql_file(_UpperCAmelCase )
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[Any] ) -> Optional[int]:
_a : int =tmp_path / """cache"""
_a : Any =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Union[str, Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=2 ).write()
_a : List[Any] =iter_sql_file(_UpperCAmelCase )
_a : str =iter_sql_file(_UpperCAmelCase )
for rowa, rowa in zip(_UpperCAmelCase ,_UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ) -> List[str]:
_a : List[str] =tmp_path / """cache"""
_a : Dict =os.path.join(_UpperCAmelCase ,"""tmp.sql""" )
_a : Optional[Any] =SqlDatasetReader("""dataset""" ,"""sqlite:///""" + sqlite_path ,cache_dir=_UpperCAmelCase ).read()
with pytest.raises(_UpperCAmelCase ):
SqlDatasetWriter(_UpperCAmelCase ,"""dataset""" ,"""sqlite:///""" + output_sqlite_path ,num_proc=0 ).write()
| 276 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Union[str, Any] = str(lowerCamelCase__ )
while len(lowerCamelCase__ ) != 1:
__lowerCamelCase : Tuple = [int(lowerCamelCase__ ) for i in num_string]
__lowerCamelCase : Optional[int] = 1
for i in range(0 , len(lowerCamelCase__ ) ):
total *= numbers[i]
__lowerCamelCase : List[str] = str(lowerCamelCase__ )
steps += 1
return steps
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Tuple = str(lowerCamelCase__ )
while len(lowerCamelCase__ ) != 1:
__lowerCamelCase : List[Any] = [int(lowerCamelCase__ ) for i in num_string]
__lowerCamelCase : Optional[Any] = 0
for i in range(0 , len(lowerCamelCase__ ) ):
total += numbers[i]
__lowerCamelCase : str = str(lowerCamelCase__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a ={
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 113 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> Tuple: # noqa: E741
while r - l > 1:
_snake_case = (l + r) // 2
if v[m] >= key:
_snake_case = m
else:
_snake_case = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
if len(__A ) == 0:
return 0
_snake_case = [0] * len(__A )
_snake_case = 1
_snake_case = v[0]
for i in range(1 , len(__A ) ):
if v[i] < tail[0]:
_snake_case = v[i]
elif v[i] > tail[length - 1]:
_snake_case = v[i]
length += 1
else:
_snake_case = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = "codegen"
UpperCAmelCase__ : str = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, SCREAMING_SNAKE_CASE_=5_0400, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=2048, SCREAMING_SNAKE_CASE_=4096, SCREAMING_SNAKE_CASE_=28, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="gelu_new", SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1e-5, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=5_0256, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
UpperCamelCase : Tuple = vocab_size
UpperCamelCase : Optional[int] = n_ctx
UpperCamelCase : Optional[int] = n_positions
UpperCamelCase : List[str] = n_embd
UpperCamelCase : Dict = n_layer
UpperCamelCase : int = n_head
UpperCamelCase : Union[str, Any] = n_inner
UpperCamelCase : int = rotary_dim
UpperCamelCase : Optional[Any] = activation_function
UpperCamelCase : Optional[int] = resid_pdrop
UpperCamelCase : Union[str, Any] = embd_pdrop
UpperCamelCase : Optional[Any] = attn_pdrop
UpperCamelCase : List[str] = layer_norm_epsilon
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : str = use_cache
UpperCamelCase : Dict = bos_token_id
UpperCamelCase : Union[str, Any] = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, tie_word_embeddings=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "default", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_, task=SCREAMING_SNAKE_CASE_, patching_specs=SCREAMING_SNAKE_CASE_, use_past=SCREAMING_SNAKE_CASE_ )
if not getattr(self._config, 'pad_token_id', SCREAMING_SNAKE_CASE_ ):
# TODO: how to do that better?
UpperCamelCase : str = 0
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase : Tuple = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_, direction='inputs' )
UpperCamelCase : List[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case_ ( self ) -> int:
return self._config.n_layer
@property
def snake_case_ ( self ) -> int:
return self._config.n_head
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = -1, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, ) -> Mapping[str, Any]:
UpperCamelCase : Tuple = super(SCREAMING_SNAKE_CASE_, self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_, batch_size=SCREAMING_SNAKE_CASE_, seq_length=SCREAMING_SNAKE_CASE_, is_pair=SCREAMING_SNAKE_CASE_, framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase : List[str] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase : List[Any] = seqlen + 2
UpperCamelCase : List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase : str = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
UpperCamelCase : List[Any] = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase : Optional[Any] = ordered_inputs['attention_mask'].dtype
UpperCamelCase : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )], dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ) -> int:
return 13
| 119 | 0 |
from torch import nn
def UpperCamelCase ( _A ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 138 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
__magic_name__ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__magic_name__ : Tuple = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
__magic_name__ : List[Any] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
__magic_name__ : Any = shift_tokens_right(lowerCAmelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__magic_name__ : List[Any] = model(lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ ).logits
__magic_name__ : Tuple = optax.softmax_cross_entropy(lowerCAmelCase__ , onehot(lowerCAmelCase__ , logits.shape[-1] ) ).mean()
__magic_name__ : List[Any] = -(labels.shape[-1] * loss.item())
__magic_name__ : List[Any] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 138 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : str = field(
metadata={"help": "The csv file to plot."} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
UpperCAmelCase__ : Optional[List[str]] = list_field(
default=A_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
try:
int(SCREAMING_SNAKE_CASE__ )
return True
except ValueError:
return False
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
try:
float(SCREAMING_SNAKE_CASE__ )
return True
except ValueError:
return False
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> int:
__UpperCamelCase =args
__UpperCamelCase =defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
__UpperCamelCase =csv.DictReader(A_ )
for row in reader:
__UpperCamelCase =row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
__UpperCamelCase =int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
__UpperCamelCase =float(row['result'] )
def _a ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase =plt.subplots()
__UpperCamelCase ='Time usage' if self.args.is_time else 'Memory usage'
__UpperCamelCase =title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__UpperCamelCase =sorted(set(self.result_dict[model_name]['bsz'] ) )
__UpperCamelCase =sorted(set(self.result_dict[model_name]['seq_len'] ) )
__UpperCamelCase =self.result_dict[model_name]['result']
((__UpperCamelCase) , (__UpperCamelCase)) =(
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__UpperCamelCase =(
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__UpperCamelCase =np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=A_ , )
else:
__UpperCamelCase =np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__UpperCamelCase) , (__UpperCamelCase)) =(
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
__UpperCamelCase =np.asarray(A_ , A_ )[: len(A_ )]
plt.scatter(
A_ , A_ , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(A_ , A_ , '--' )
title_str += f' {label_model_name} vs.'
__UpperCamelCase =title_str[:-4]
__UpperCamelCase ='Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(A_ )
plt.xlabel(A_ )
plt.ylabel(A_ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _UpperCAmelCase ( ):
__UpperCamelCase =HfArgumentParser(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =parser.parse_args_into_dataclasses()[0]
__UpperCamelCase =Plot(args=SCREAMING_SNAKE_CASE__ )
plot.plot()
if __name__ == "__main__":
main()
| 62 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ : Tuple = False
def _snake_case ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A: Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
A: int = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
A: Dict = toks + toks
# toks_str = [t[1] for t in toks]
A: Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
A: int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
A: Tuple = ''' ''' + output_txt
A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3)
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Any = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[str] = '''Hello how are you'''
A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A: List[str] = tokenizer.decode(sample_ids[0] )
A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
A: int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: List[Any] = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def _snake_case ( self : List[str] ) -> int:
'''simple docstring'''
A: Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Optional[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
A: str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A: Tuple = tokenizer.decode(sample_ids[0] )
A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Union[str, Any] = '''Hello how are you'''
A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Any = '''Hello how are you'''
A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = '''Hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids
A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' )
def _snake_case ( self : str ) -> str:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: str = '''Hello how Are you'''
A: Union[str, Any] = '''hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
A: Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Any = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
A: str = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) )
# transform list to ModelOutput
A: Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
[recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
A: int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ )
A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def _snake_case ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def _snake_case ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: str = tokenizer.vocab_size
A: str = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) )
A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) )
A: int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
| 319 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int = 2_00_00_00 ) -> int:
'''simple docstring'''
__snake_case : int = [0 for i in range(n + 1 )]
__snake_case : int = 1
__snake_case : str = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , UpperCAmelCase_ ):
__snake_case : Optional[int] = 1
__snake_case : str = 0
for i in range(UpperCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 95 | """simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Union[str, Any]) -> Optional[int]:
__snake_case : Optional[Any] = 0
def _lowercase (self : Tuple) -> int:
__snake_case : Optional[Any] = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.assertIsInstance(_A , _A)
def _lowercase (self : str) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[str] = Path(_A) / 'preprocessor_config.json'
__snake_case : Optional[Any] = Path(_A) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : Any) -> Optional[int]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Any = Path(_A) / 'preprocessor_config.json'
__snake_case : List[Any] = Path(_A) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Tuple = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[Any]) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : str = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__snake_case : List[Any] = Path(_A) / 'preprocessor_config.json'
__snake_case : Optional[Any] = Path(_A) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__snake_case : List[str] = AutoImageProcessor.from_pretrained(_A).to_dict()
config_dict.pop('image_processor_type')
__snake_case : Optional[int] = CLIPImageProcessor(**_A)
# save in new folder
model_config.save_pretrained(_A)
config.save_pretrained(_A)
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(_A)
# make sure private variable is not incorrectly saved
__snake_case : int = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(_A , _A)
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = Path(_A) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
__snake_case : List[str] = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : Optional[int]) -> Dict:
with self.assertRaisesRegex(
_A , 'clip-base is not a local folder and is not a valid model identifier'):
__snake_case : Tuple = AutoImageProcessor.from_pretrained('clip-base')
def _lowercase (self : str) -> int:
with self.assertRaisesRegex(
_A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case : str = AutoImageProcessor.from_pretrained(_A , revision='aaaaaa')
def _lowercase (self : List[Any]) -> str:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__snake_case : List[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model')
def _lowercase (self : Optional[int]) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A):
__snake_case : Any = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A):
__snake_case : Tuple = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A)
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(_A , trust_remote_code=_A)
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor')
def _lowercase (self : int) -> Optional[int]:
try:
AutoConfig.register('custom' , _A)
AutoImageProcessor.register(_A , _A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A):
AutoImageProcessor.register(_A , _A)
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Tuple = Path(_A) / 'preprocessor_config.json'
__snake_case : Dict = Path(_A) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Tuple = CustomImageProcessor.from_pretrained(_A)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A)
__snake_case : Tuple = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : List[Any]) -> Tuple:
class UpperCamelCase ( lowercase ):
UpperCAmelCase : str = True
try:
AutoConfig.register('custom' , _A)
AutoImageProcessor.register(_A , _A)
# If remote code is not set, the default is to use local
__snake_case : Tuple = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
__snake_case : List[Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(not hasattr(_A , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 95 | 1 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
A__ : str = TypeVar('T')
A__ : Dict = Union[List[T], Tuple[T, ...]]
A__ : Dict = Union[T, List[T], Dict[str, T]]
A__ : Any = Union[str, bytes, os.PathLike]
| 144 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = list(snake_case__ )
_snake_case : List[Any] = list(snake_case__ )
_snake_case : List[Any] = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count += 1
_snake_case : Any = """_"""
if count > 1:
return False
else:
return "".join(snake_case__ )
def UpperCAmelCase__ (snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = []
while True:
_snake_case : Union[str, Any] = ["""$"""] * len(snake_case__ )
_snake_case : int = []
for i in range(len(snake_case__ ) ):
for j in range(i + 1 , len(snake_case__ ) ):
_snake_case : List[Any] = compare_string(binary[i] , binary[j] )
if k is False:
_snake_case : Dict = """*"""
_snake_case : List[Any] = """*"""
temp.append("""X""" )
for i in range(len(snake_case__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case__ ) == 0:
return pi
_snake_case : Optional[int] = list(set(snake_case__ ) )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Sequence[float] ):
"""simple docstring"""
_snake_case : Optional[int] = []
for minterm in minterms:
_snake_case : Any = """"""
for _ in range(snake_case__ ):
_snake_case : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case__ )
return temp
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : str , snake_case__ : int ):
"""simple docstring"""
_snake_case : Dict = list(snake_case__ )
_snake_case : List[str] = list(snake_case__ )
_snake_case : Tuple = 0
for i in range(len(snake_case__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase__ (snake_case__ : list[list[int]] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : Any = []
_snake_case : Union[str, Any] = [0] * len(snake_case__ )
for i in range(len(chart[0] ) ):
_snake_case : Tuple = 0
_snake_case : str = -1
for j in range(len(snake_case__ ) ):
if chart[j][i] == 1:
count += 1
_snake_case : Union[str, Any] = j
if count == 1:
_snake_case : Union[str, Any] = 1
for i in range(len(snake_case__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case__ ) ):
_snake_case : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
_snake_case : Optional[int] = 0
_snake_case : str = -1
_snake_case : Any = 0
for i in range(len(snake_case__ ) ):
_snake_case : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_snake_case : Dict = count_n
_snake_case : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case__ ) ):
_snake_case : Optional[Any] = 0
def UpperCAmelCase__ (snake_case__ : list[str] , snake_case__ : list[str] ):
"""simple docstring"""
_snake_case : int = [[0 for x in range(len(snake_case__ ) )] for x in range(len(snake_case__ ) )]
for i in range(len(snake_case__ ) ):
_snake_case : Any = prime_implicants[i].count("""_""" )
for j in range(len(snake_case__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__ ):
_snake_case : Tuple = 1
return chart
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : int = int(input("""Enter the no. of variables\n""" ) )
_snake_case : List[str] = [
float(snake_case__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_snake_case : List[str] = decimal_to_binary(snake_case__ , snake_case__ )
_snake_case : str = check(snake_case__ )
print("""Prime Implicants are:""" )
print(snake_case__ )
_snake_case : int = prime_implicant_chart(snake_case__ , snake_case__ )
_snake_case : str = selection(snake_case__ , snake_case__ )
print("""Essential Prime Implicants are:""" )
print(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 64 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _a :
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=13 , _SCREAMING_SNAKE_CASE : List[str]=64 , _SCREAMING_SNAKE_CASE : int=2 , _SCREAMING_SNAKE_CASE : List[str]=3 , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : List[str]=32 , _SCREAMING_SNAKE_CASE : List[Any]=5 , _SCREAMING_SNAKE_CASE : Optional[Any]=4 , _SCREAMING_SNAKE_CASE : Optional[Any]=37 , _SCREAMING_SNAKE_CASE : Optional[int]="gelu" , _SCREAMING_SNAKE_CASE : List[Any]=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=0.1 , _SCREAMING_SNAKE_CASE : str=10 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 16, 4, 4] , _SCREAMING_SNAKE_CASE : Optional[Any]=None , )-> Any:
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : List[Any] = patch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : Dict = scope
lowerCAmelCase__ : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase__ : Optional[int] = (self.image_size // 32) ** 2
lowerCAmelCase__ : Any = num_patches + 1
def UpperCAmelCase__( self : Optional[Any] )-> List[Any]:
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__( self : Tuple )-> Union[str, Any]:
lowerCAmelCase__ : Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] )-> Optional[Any]:
lowerCAmelCase__ : Any = ViTHybridModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int )-> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = self.type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = ViTHybridForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__( self : List[Any] )-> Tuple:
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ : Optional[Any] = config_and_inputs
lowerCAmelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowercase , _lowercase , unittest.TestCase):
_a : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_a : Tuple = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_a : Dict = False
_a : Optional[int] = False
_a : Optional[int] = False
def UpperCAmelCase__( self : Dict )-> Any:
lowerCAmelCase__ : Optional[Any] = ViTHybridModelTester(self )
lowerCAmelCase__ : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCAmelCase__( self : int )-> int:
pass
def UpperCAmelCase__( self : Optional[int] )-> int:
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__( self : int )-> Optional[int]:
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] )-> Tuple:
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict )-> int:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Dict = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase__ : str = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCAmelCase__( self : List[str] )-> str:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Tuple = ViTHybridModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase):
@cached_property
def UpperCAmelCase__( self : Union[str, Any] )-> Dict:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__( self : List[str] )-> Any:
lowerCAmelCase__ : List[Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = self.default_image_processor
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : int = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase__ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
@require_accelerate
def UpperCAmelCase__( self : str )-> Tuple:
lowerCAmelCase__ : Optional[Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
lowerCAmelCase__ : List[str] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowerCAmelCase__ : str = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase__ : Union[str, Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 366 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _a ( _lowercase):
_a : List[Any] = '''dpr'''
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : List[str]=3_0522 , _SCREAMING_SNAKE_CASE : Optional[int]=768 , _SCREAMING_SNAKE_CASE : List[str]=12 , _SCREAMING_SNAKE_CASE : Tuple=12 , _SCREAMING_SNAKE_CASE : str=3072 , _SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=0.1 , _SCREAMING_SNAKE_CASE : List[str]=512 , _SCREAMING_SNAKE_CASE : int=2 , _SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , _SCREAMING_SNAKE_CASE : Tuple=1E-12 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0 , _SCREAMING_SNAKE_CASE : List[str]="absolute" , _SCREAMING_SNAKE_CASE : int = 0 , **_SCREAMING_SNAKE_CASE : Optional[Any] , )-> Optional[int]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : Union[str, Any] = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : Dict = projection_dim
lowerCAmelCase__ : int = position_embedding_type
| 211 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def lowercase__ ( *__snake_case : str , **__snake_case : List[Any] ) -> Optional[int]:
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@require_torch
def lowercase__ ( self : List[Any] ) -> Any:
_lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = image_classifier(__snake_case , candidate_labels=["""a""", """b""", """c"""] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__snake_case ) , [
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}],
[{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """c"""}, {"""score""": 0.3_33, """label""": """b"""}],
] , )
_lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
] , )
@require_tf
def lowercase__ ( self : str ) -> Union[str, Any]:
_lowerCAmelCase = pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""" )
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = image_classifier(__snake_case , candidate_labels=["""a""", """b""", """c"""] )
self.assertEqual(
nested_simplify(__snake_case ) , [{"""score""": 0.3_33, """label""": """a"""}, {"""score""": 0.3_33, """label""": """b"""}, {"""score""": 0.3_33, """label""": """c"""}] , )
_lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
[
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
{"""score""": 0.3_33, """label""": ANY(__snake_case )},
],
] , )
@slow
@require_torch
def lowercase__ ( self : List[str] ) -> List[Any]:
_lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = image_classifier(__snake_case , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
_lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def lowercase__ ( self : Dict ) -> Tuple:
_lowerCAmelCase = pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""" )
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase = image_classifier(__snake_case , candidate_labels=["""cat""", """plane""", """remote"""] )
self.assertEqual(
nested_simplify(__snake_case ) , [
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
] , )
_lowerCAmelCase = image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2 )
self.assertEqual(
nested_simplify(__snake_case ) , [
[
{"""score""": 0.5_11, """label""": """remote"""},
{"""score""": 0.4_85, """label""": """cat"""},
{"""score""": 0.0_04, """label""": """plane"""},
],
]
* 5 , )
| 70 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase ( snake_case_ ):
_lowercase: Union[str, Any] = ['''image_processor''', '''tokenizer''']
_lowercase: int = '''AutoImageProcessor'''
_lowercase: Optional[int] = '''AutoTokenizer'''
def __init__( self : int , __snake_case : Tuple=None , __snake_case : Optional[int]=None , **__snake_case : Tuple ) -> List[Any]:
_lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __snake_case , )
_lowerCAmelCase = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__snake_case , __snake_case )
_lowerCAmelCase = self.image_processor
_lowerCAmelCase = False
def __call__( self : Dict , *__snake_case : Optional[int] , **__snake_case : Union[str, Any] ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
_lowerCAmelCase = kwargs.pop("""images""" , __snake_case )
_lowerCAmelCase = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
_lowerCAmelCase = args[0]
_lowerCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_lowerCAmelCase = self.image_processor(__snake_case , *__snake_case , **__snake_case )
if text is not None:
_lowerCAmelCase = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCAmelCase = encodings["""input_ids"""]
return inputs
def lowercase__ ( self : List[Any] , *__snake_case : Dict , **__snake_case : List[str] ) -> int:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowercase__ ( self : int , *__snake_case : Tuple , **__snake_case : Optional[Any] ) -> Any:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@contextmanager
def lowercase__ ( self : int ) -> Optional[Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_lowerCAmelCase = True
_lowerCAmelCase = self.tokenizer
yield
_lowerCAmelCase = self.image_processor
_lowerCAmelCase = False
def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any]=False , __snake_case : Dict=None ) -> Tuple:
if added_vocab is None:
_lowerCAmelCase = self.tokenizer.get_added_vocab()
_lowerCAmelCase = {}
while tokens:
_lowerCAmelCase = re.search(R"""<s_(.*?)>""" , __snake_case , re.IGNORECASE )
if start_token is None:
break
_lowerCAmelCase = start_token.group(1 )
_lowerCAmelCase = re.search(Rf"</s_{key}>" , __snake_case , re.IGNORECASE )
_lowerCAmelCase = start_token.group()
if end_token is None:
_lowerCAmelCase = tokens.replace(__snake_case , """""" )
else:
_lowerCAmelCase = end_token.group()
_lowerCAmelCase = re.escape(__snake_case )
_lowerCAmelCase = re.escape(__snake_case )
_lowerCAmelCase = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , __snake_case , re.IGNORECASE )
if content is not None:
_lowerCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_lowerCAmelCase = self.tokenajson(__snake_case , is_inner_value=__snake_case , added_vocab=__snake_case )
if value:
if len(__snake_case ) == 1:
_lowerCAmelCase = value[0]
_lowerCAmelCase = value
else: # leaf nodes
_lowerCAmelCase = []
for leaf in content.split(R"""<sep/>""" ):
_lowerCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_lowerCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__snake_case )
if len(output[key] ) == 1:
_lowerCAmelCase = output[key][0]
_lowerCAmelCase = tokens[tokens.find(__snake_case ) + len(__snake_case ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__snake_case , added_vocab=__snake_case )
if len(__snake_case ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , )
return self.image_processor_class
@property
def lowercase__ ( self : List[Any] ) -> Any:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , )
return self.image_processor
| 70 | 1 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
_snake_case = img
_snake_case = img.shape[1]
_snake_case = img.shape[0]
_snake_case = dst_width
_snake_case = dst_height
_snake_case = self.src_w / self.dst_w
_snake_case = self.src_h / self.dst_h
_snake_case = _snake_case = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def lowerCamelCase ( self ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
_snake_case = self.img[self.get_y(lowerCAmelCase_ )][self.get_x(lowerCAmelCase_ )]
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return int(self.ratio_x * x )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
lowercase , lowercase : Optional[Any] = 800, 600
lowercase : Tuple = imread("image_data/lena.jpg", 1)
lowercase : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 160 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(__A , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 160 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''ClapFeatureExtractor'''
__lowerCamelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = kwargs.pop("""sampling_rate""" , _snake_case )
if text is None and audios is None:
raise ValueError("""You have to specify either text or audios. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase = self.tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if audios is not None:
_lowerCAmelCase = self.feature_extractor(
_snake_case , sampling_rate=_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None and audios is not None:
_lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def snake_case ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def snake_case ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 82 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCamelCase : Union[str, Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
a = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
a = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
else:
a = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> int:
a = []
a = fairseq_model.state_dict()
a = hf_model.feature_extractor
a = hf_model.adapter
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
a = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
a = True
if "*" in mapped_key:
a = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
a = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
a = """weight_g"""
elif "weight_v" in name:
a = """weight_v"""
elif "bias" in name:
a = """bias"""
elif "weight" in name:
a = """weight"""
else:
a = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
a = full_name.split("""conv_layers.""" )[-1]
a = name.split(""".""" )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
a = full_name.split("""adaptor.""" )[-1]
a = name.split(""".""" )
if items[1].isdigit():
a = int(items[1] )
else:
a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
a = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
a = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
a = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
a = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
a = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
def __A ( __lowerCamelCase ) -> Tuple:
a , a = emb.weight.shape
a = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
a = emb.weight.data
return lin_layer
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
a = WavaVecaConfig.from_pretrained(
__lowerCamelCase , add_adapter=__lowerCamelCase , adapter_stride=__lowerCamelCase , adapter_kernel_size=__lowerCamelCase , use_auth_token=__lowerCamelCase , output_hidden_size=__lowerCamelCase , )
a = MBartConfig.from_pretrained(__lowerCamelCase )
# load model
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
a = model[0].eval()
# load feature extractor
a = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase , use_auth_token=__lowerCamelCase )
# set weights for wav2vec2 encoder
a = WavaVecaModel(__lowerCamelCase )
recursively_load_weights_wavaveca(model.encoder , __lowerCamelCase )
# load decoder weights
a = MBartForCausalLM(__lowerCamelCase )
a , a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__lowerCamelCase )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
a = SpeechEncoderDecoderModel(encoder=__lowerCamelCase , decoder=__lowerCamelCase )
a = False
a = MBartaaTokenizer(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
a = hf_wavavec.config.to_dict()
a = tokenizer.pad_token_id
a = tokenizer.bos_token_id
a = tokenizer.eos_token_id
a = """mbart50"""
a = """wav2vec2"""
a = tokenizer.eos_token_id
a = 25_0004
a = tokenizer.eos_token_id
a = SpeechEncoderDecoderConfig.from_dict(__lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
feature_extractor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
__UpperCamelCase : int = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 228 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=4 , ):
'''simple docstring'''
lowercase__ : Any= parent
lowercase__ : Tuple= batch_size
lowercase__ : str= seq_length
lowercase__ : Tuple= is_training
lowercase__ : Dict= use_attention_mask
lowercase__ : str= use_token_type_ids
lowercase__ : Optional[Any]= use_labels
lowercase__ : List[str]= vocab_size
lowercase__ : Dict= hidden_size
lowercase__ : Any= num_hidden_layers
lowercase__ : Optional[int]= num_attention_heads
lowercase__ : str= intermediate_size
lowercase__ : Any= hidden_act
lowercase__ : Dict= hidden_dropout_prob
lowercase__ : Dict= attention_probs_dropout_prob
lowercase__ : str= max_position_embeddings
lowercase__ : Union[str, Any]= type_vocab_size
lowercase__ : Optional[Any]= type_sequence_label_size
lowercase__ : str= initializer_range
lowercase__ : int= num_choices
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : List[str]= None
if self.use_attention_mask:
lowercase__ : Optional[int]= random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[Any]= None
if self.use_token_type_ids:
lowercase__ : Optional[int]= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Any= AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__, lowercase__ : int= config_and_inputs
lowercase__ : List[str]= {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase__ : Dict= model_class_name.from_pretrained("albert-base-v2" )
lowercase__ : Tuple= model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
@require_flax
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= FlaxAlbertModel.from_pretrained("albert-base-v2" )
lowercase__ : List[str]= np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ : Any= np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : Any= model(snake_case__ , attention_mask=snake_case__ )[0]
lowercase__ : str= (1, 11, 768)
self.assertEqual(output.shape , snake_case__ )
lowercase__ : Any= np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 150 |
"""simple docstring"""
from __future__ import annotations
class __UpperCAmelCase:
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= data
lowercase__ : Node | None= None
lowercase__ : Node | None= None
def lowercase__(A ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase__(A ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowercase__(A ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase__() ->None: # Main function for testing.
"""simple docstring"""
lowercase__ : Tuple= Node(1 )
lowercase__ : Optional[int]= Node(2 )
lowercase__ : List[str]= Node(3 )
lowercase__ : Tuple= Node(4 )
lowercase__ : Optional[int]= Node(5 )
lowercase__ : Any= Node(6 )
lowercase__ : Optional[Any]= Node(7 )
lowercase__ : Optional[int]= Node(8 )
lowercase__ : List[str]= Node(9 )
print(is_full_binary_tree(A ) )
print(depth_of_tree(A ) )
print("Tree is: " )
display(A )
if __name__ == "__main__":
main()
| 150 | 1 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __A, __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = StableDiffusionDiffEditPipeline
lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase = frozenset([] )
def UpperCAmelCase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowerCamelCase , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
A_ : Tuple = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowerCamelCase , set_alpha_to_zero=_lowerCamelCase , )
torch.manual_seed(0 )
A_ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A_ : Any = CLIPTextModel(_lowerCamelCase )
A_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> List[Any]:
A_ : Union[str, Any] = floats_tensor((1, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
A_ : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith("""mps""" ):
A_ : Tuple = torch.manual_seed(_lowerCamelCase )
else:
A_ : int = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : Any = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> Tuple:
A_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
A_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Any = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert("""RGB""" )
if str(_lowerCamelCase ).startswith("""mps""" ):
A_ : Any = torch.manual_seed(_lowerCamelCase )
else:
A_ : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : Optional[int] = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> List[str]:
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
A_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ : Optional[int] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert("""RGB""" )
if str(_lowerCamelCase ).startswith("""mps""" ):
A_ : str = torch.manual_seed(_lowerCamelCase )
else:
A_ : Optional[int] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : Tuple = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
A_ : Tuple = self.get_dummy_components()
A_ : Optional[Any] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
A_ : str = self.get_dummy_inputs(_lowerCamelCase )
A_ : int = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
A_ : int = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowerCamelCase , _lowerCamelCase ) is None , F"`{optional_component}` did not stay set to None after loading." , )
A_ : List[str] = self.get_dummy_inputs(_lowerCamelCase )
A_ : Tuple = pipe_loaded(**_lowerCamelCase )[0]
A_ : Optional[int] = np.abs(output - output_loaded ).max()
self.assertLess(_lowerCamelCase , 1e-4 )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[Any] = """cpu"""
A_ : int = self.get_dummy_components()
A_ : Optional[int] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : List[Any] = self.get_dummy_mask_inputs(_lowerCamelCase )
A_ : Optional[int] = pipe.generate_mask(**_lowerCamelCase )
A_ : Optional[int] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
A_ : Tuple = np.array([0] * 9 )
A_ : int = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase_ ( self ) -> Dict:
A_ : Union[str, Any] = """cpu"""
A_ : Optional[Any] = self.get_dummy_components()
A_ : str = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Optional[int] = self.get_dummy_inversion_inputs(_lowerCamelCase )
A_ : Optional[Any] = pipe.invert(**_lowerCamelCase ).images
A_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
A_ : str = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
A_ : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
def UpperCAmelCase_ ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Any = """cpu"""
A_ : Union[str, Any] = self.get_dummy_components()
A_ : Union[str, Any] = {"""beta_start""": 0.0_0085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
A_ : Optional[Any] = DPMSolverMultistepScheduler(**_lowerCamelCase )
A_ : List[Any] = DPMSolverMultistepInverseScheduler(**_lowerCamelCase )
A_ : Tuple = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Union[str, Any] = self.get_dummy_inversion_inputs(_lowerCamelCase )
A_ : List[Any] = pipe.invert(**_lowerCamelCase ).images
A_ : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
A_ : Optional[int] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
A_ : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
@require_torch_gpu
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase_ ( cls ) -> Dict:
A_ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
A_ : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) )
A_ : Tuple = raw_image
def UpperCAmelCase_ ( self ) -> List[Any]:
A_ : Tuple = torch.manual_seed(0 )
A_ : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
A_ : Optional[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
A_ : Any = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : str = """a bowl of fruit"""
A_ : Optional[int] = """a bowl of pears"""
A_ : str = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowerCamelCase , target_prompt=_lowerCamelCase , generator=_lowerCamelCase , )
A_ : Tuple = pipe.invert(
prompt=_lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowerCamelCase ).latents
A_ : Optional[int] = pipe(
prompt=_lowerCamelCase , mask_image=_lowerCamelCase , image_latents=_lowerCamelCase , generator=_lowerCamelCase , negative_prompt=_lowerCamelCase , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
A_ : List[str] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Optional[int] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
A_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Optional[Any] = """a bowl of fruit"""
A_ : Dict = """a bowl of pears"""
A_ : List[str] = pipe.generate_mask(
image=self.raw_image , source_prompt=_lowerCamelCase , target_prompt=_lowerCamelCase , generator=_lowerCamelCase , )
A_ : Any = pipe.invert(
prompt=_lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=_lowerCamelCase , num_inference_steps=25 , ).latents
A_ : int = pipe(
prompt=_lowerCamelCase , mask_image=_lowerCamelCase , image_latents=_lowerCamelCase , generator=_lowerCamelCase , negative_prompt=_lowerCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
A_ : Any = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 344 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''vision-encoder-decoder'''
lowerCamelCase = True
def __init__( self , **_lowerCamelCase ) -> str:
super().__init__(**_lowerCamelCase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
A_ : Optional[int] = kwargs.pop("""encoder""" )
A_ : List[str] = encoder_config.pop("""model_type""" )
A_ : str = kwargs.pop("""decoder""" )
A_ : Optional[Any] = decoder_config.pop("""model_type""" )
A_ : List[str] = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
A_ : Any = True
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> PretrainedConfig:
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
A_ : int = True
A_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Any:
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : List[str] = self.encoder.to_dict()
A_ : Union[str, Any] = self.decoder.to_dict()
A_ : str = self.__class__.model_type
return output
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase_ ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
A_ : Optional[Any] = OrderedDict()
A_ : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : str = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
import torch
A_ : Optional[int] = OrderedDict()
A_ : List[Any] = super().generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
A_ , A_ : str = dummy_input["""input_ids"""].shape
A_ : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
A_ : Union[str, Any] = dummy_input.pop("""input_ids""" )
A_ : List[str] = dummy_input.pop("""attention_mask""" )
A_ : Optional[int] = torch.zeros(_lowerCamelCase )
return common_inputs
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> None:
pass
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "default" ) -> OnnxConfig:
A_ : List[Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
| 344 | 1 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_lowercase : List[str] = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCamelCase__:
__magic_name__ : str
__magic_name__ : Optional[str] = None
__magic_name__ : Optional[Union[str, int]] = None
__magic_name__ : Optional[Union[str, int]] = None
__magic_name__ : Optional[Union[str, int]] = None
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = _str_to_version_tuple(self.version_str )
def __repr__( self : Dict )-> int:
"""simple docstring"""
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def a__( self : Any )-> int:
"""simple docstring"""
return self.major, self.minor, self.patch
def a__( self : Optional[Any] , lowerCAmelCase : Dict )-> Union[str, Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return Version(lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
return other
raise TypeError(F"""{other} (type {type(lowerCAmelCase )}) cannot be compared to version.""" )
def __eq__( self : Optional[Any] , lowerCAmelCase : Union[str, Any] )-> List[str]:
"""simple docstring"""
try:
UpperCAmelCase = self._validate_operand(lowerCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self._validate_operand(lowerCAmelCase )
return self.tuple < other.tuple
def __hash__( self : List[Any] )-> List[str]:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def a__( cls : Optional[Any] , lowerCAmelCase : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def a__( self : int )-> str:
"""simple docstring"""
return self.version_str
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = _VERSION_REG.match(A )
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(A ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
return ".".join(str(A ) for v in version_tuple )
| 91 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Any = ShapEPipeline
__magic_name__ : Tuple = ["prompt"]
__magic_name__ : Optional[int] = ["prompt"]
__magic_name__ : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__magic_name__ : Optional[int] = False
@property
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
return 32
@property
def a__( self : Dict )-> Dict:
"""simple docstring"""
return 32
@property
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a__( self : List[str] )-> str:
"""simple docstring"""
return 8
@property
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a__( self : Tuple )-> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase )
@property
def a__( self : str )-> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
UpperCAmelCase = PriorTransformer(**lowerCAmelCase )
return model
@property
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase = ShapERenderer(**lowerCAmelCase )
return model
def a__( self : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.dummy_prior
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_renderer
UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=lowerCAmelCase , clip_sample=lowerCAmelCase , clip_sample_range=1.0 , )
UpperCAmelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def a__( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any]=0 )-> Optional[Any]:
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase )
UpperCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
UpperCAmelCase = output.images[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = torch_device == '''cpu'''
UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase , relax_max_difference=lowerCAmelCase , )
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase )
UpperCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = 1
UpperCAmelCase = 2
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase = batch_size * [inputs[key]]
UpperCAmelCase = pipe(**lowerCAmelCase , num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
UpperCAmelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
UpperCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase = pipe(
'''a shark''' , generator=lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
| 91 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
if not isinstance(A__ , A__ ):
_a : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(A__ )
if number < 0:
return False
_a : int = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase : List[str] = logging.get_logger(__name__)
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Optional[Any] = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **lowercase) -> Tuple:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a__ : Union[str, Any] = deprecated_arg[3:]
setattr(self , lowercase , not kwargs.pop(lowercase))
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}')
a__ : Union[str, Any] = kwargs.pop('torchscript' , self.torchscript)
a__ : Tuple = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics)
a__ : Tuple = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level)
super().__init__(**lowercase)
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Trace the models using torchscript'''} )
__A : bool = field(default=__UpperCAmelCase , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
__A : str = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def __lowercase ( self) -> Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ['torch'])
logger.info('PyTorch: setting up devices')
if not self.cuda:
a__ : List[str] = torch.device('cpu')
a__ : Optional[Any] = 0
elif is_torch_tpu_available():
a__ : List[str] = xm.xla_device()
a__ : Union[str, Any] = 0
else:
a__ : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
a__ : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __lowercase ( self) -> int:
'''simple docstring'''
requires_backends(self , ['torch'])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __lowercase ( self) -> "torch.device":
'''simple docstring'''
requires_backends(self , ['torch'])
return self._setup_devices[0]
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'])
return self._setup_devices[1]
@property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return self.n_gpu > 0
| 99 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ = 5_0_0_0_0
lowercase_ = 5_0_0_0
lowercase_ , lowercase_ = os.path.split(__file__)
lowercase_ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowercase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Any ) -> int:
for i in range(lowerCAmelCase__ ):
__a = dataset[i]
@get_duration
def lowercase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
__a = dataset[i : i + batch_size]
@get_duration
def lowercase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] ) -> Any:
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
__a = dataset[i]
@get_duration
def lowercase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple ) -> List[Any]:
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = dataset[i : i + batch_size]
def lowercase ( ) -> Dict:
__a = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__a = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
__a = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__a = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__a = generate_example_dataset(
os.path.join(lowerCAmelCase__ , '''dataset.arrow''' ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
__a = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print('''shuffling dataset''' )
__a = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(lowerCAmelCase__ ) )
__a = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''wb''' ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 11 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='open-llama'
def __init__(self , a_=10_00_00 , a_=40_96 , a_=1_10_08 , a_=32 , a_=32 , a_="silu" , a_=20_48 , a_=0.02 , a_=1E-6 , a_=True , a_=0 , a_=1 , a_=2 , a_=False , a_=True , a_=0.1 , a_=0.1 , a_=True , a_=True , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : int = vocab_size
__snake_case : Any = max_position_embeddings
__snake_case : Tuple = hidden_size
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : Tuple = hidden_act
__snake_case : str = initializer_range
__snake_case : Tuple = rms_norm_eps
__snake_case : str = use_cache
__snake_case : Union[str, Any] = kwargs.pop(
'''use_memorry_efficient_attention''' , a_ )
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_dropout_prob
__snake_case : Any = use_stable_embedding
__snake_case : Union[str, Any] = shared_input_output_embedding
__snake_case : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , tie_word_embeddings=a_ , **a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__snake_case : Tuple = self.rope_scaling.get('''type''' , a_ )
__snake_case : List[str] = self.rope_scaling.get('''factor''' , a_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(a_ , a_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 102 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE : List[Any] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase ( _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=_snake_case )
def lowercase ( _snake_case : Optional[Any] , _snake_case : Dict ) ->Any:
"""simple docstring"""
__snake_case : List[Any] = tmp_path_factory.getbasetemp() / '''cache'''
__snake_case : int = test_hf_cache_home / '''datasets'''
__snake_case : Tuple = test_hf_cache_home / '''metrics'''
__snake_case : List[str] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_snake_case ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_snake_case ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_snake_case ) )
__snake_case : Optional[int] = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_snake_case ) )
__snake_case : Tuple = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_snake_case ) )
@pytest.fixture(autouse=_snake_case , scope='''session''' )
def lowercase ( ) ->Any:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_snake_case )
def lowercase ( _snake_case : Tuple ) ->Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _snake_case )
@pytest.fixture
def lowercase ( _snake_case : Any ) ->Optional[Any]:
"""simple docstring"""
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _snake_case )
| 102 | 1 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer("""Do you support jax jitted function?""" ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCamelCase__ : Union[str, Any] ):
return model(**lowerCamelCase__ )
eval(**lowerCamelCase__ ).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = FlaxRobertaModel.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer("""Do you support jax jitted function?""" ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCamelCase__ : Union[str, Any] ):
return model(**lowerCamelCase__ )
eval(**lowerCamelCase__ ).block_until_ready()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" ,):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(lowerCamelCase__ ,"""Use `from_pt=True` to load this model""" ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 364 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 193 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> list:
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase , UpperCAmelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase = result + left + right
return input_list
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list:
'''simple docstring'''
if len(UpperCamelCase__ ) <= 1:
return input_list
UpperCAmelCase = list(UpperCamelCase__ )
# iteration for two-way merging
UpperCAmelCase = 2
while p <= len(UpperCamelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
UpperCAmelCase = i
UpperCAmelCase = i + p - 1
UpperCAmelCase = (low + high + 1) // 2
UpperCAmelCase = merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase__ ):
UpperCAmelCase = i
UpperCAmelCase = merge(UpperCamelCase__ , 0 , UpperCamelCase__ , len(UpperCamelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__A : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
__A : Tuple = []
else:
__A : List[Any] = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 273 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A_ :
UpperCAmelCase__ = MBartConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_3 , _A=7 , _A=True , _A=False , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A=0.1 , _A=0.1 , _A=2_0 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = TFMBartModel(config=_A ).get_decoder()
UpperCAmelCase = inputs_dict['''input_ids''']
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase = inputs_dict['''head_mask''']
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
UpperCAmelCase = past_key_values[1]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFMBartModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class A_ (unittest.TestCase ):
UpperCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCAmelCase__ = '''facebook/mbart-large-en-ro'''
@cached_property
def _lowercase ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _lowercase ( self , **_A ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer(self.src_text , **_A , return_tensors='''tf''' )
UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCAmelCase = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _lowercase ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 273 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__a : Optional[Any] = logging.get_logger(__name__)
class _a ( __lowerCAmelCase ):
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 371 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : str = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = """sew"""
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0_2 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,_SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.0_5 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE="mean" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,**_SCREAMING_SNAKE_CASE ,) -> str:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
_snake_case = hidden_size
_snake_case = feat_extract_norm
_snake_case = feat_extract_activation
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = conv_bias
_snake_case = num_conv_pos_embeddings
_snake_case = num_conv_pos_embedding_groups
_snake_case = len(self.conv_dim )
_snake_case = num_hidden_layers
_snake_case = intermediate_size
_snake_case = squeeze_factor
_snake_case = hidden_act
_snake_case = num_attention_heads
_snake_case = hidden_dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = feat_proj_dropout
_snake_case = final_dropout
_snake_case = layerdrop
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case = apply_spec_augment
_snake_case = mask_time_prob
_snake_case = mask_time_length
_snake_case = mask_time_min_masks
_snake_case = mask_feature_prob
_snake_case = mask_feature_length
_snake_case = mask_feature_min_masks
# ctc loss
_snake_case = ctc_loss_reduction
_snake_case = ctc_zero_infinity
# sequence classification
_snake_case = use_weighted_layer_sum
_snake_case = classifier_proj_size
@property
def _lowercase ( self ) -> Optional[Any]:
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 142 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
SCREAMING_SNAKE_CASE = len(self )
if size == len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
SCREAMING_SNAKE_CASE = len(self )
if size == len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
SCREAMING_SNAKE_CASE = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = len(self )
SCREAMING_SNAKE_CASE = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception('invalid operand!' )
def __A ( self ) -> Vector:
return Vector(self.__components )
def __A ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
SCREAMING_SNAKE_CASE = value
def __A ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
SCREAMING_SNAKE_CASE = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
SCREAMING_SNAKE_CASE = self * other
SCREAMING_SNAKE_CASE = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Vector:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return Vector([0] * dimension )
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Vector:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ))
SCREAMING_SNAKE_CASE = [0] * dimension
SCREAMING_SNAKE_CASE = 1
return Vector(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Vector , SCREAMING_SNAKE_CASE_ : Vector ) -> Vector:
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and (isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ))
)
return x * scalar + y
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Vector:
random.seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )]
return Vector(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = matrix
SCREAMING_SNAKE_CASE = w
SCREAMING_SNAKE_CASE = h
def __str__( self ) -> str:
SCREAMING_SNAKE_CASE = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
SCREAMING_SNAKE_CASE = zero_vector(self.__height )
for i in range(self.__height ):
SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
SCREAMING_SNAKE_CASE = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __A ( self ) -> int:
return self.__height
def __A ( self ) -> int:
return self.__width
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE = value
else:
raise Exception('change_component: indices out of bounds' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
SCREAMING_SNAKE_CASE = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception('Indices out of bounds' )
def __A ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Matrix:
SCREAMING_SNAKE_CASE = [[0] * n for _ in range(SCREAMING_SNAKE_CASE_ )]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Matrix:
random.seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [
[random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )
]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 113 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 113 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : Optional[Any] = WavaVecaForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ )
lowercase_ : int = downstream_dict['projector.weight']
lowercase_ : Optional[int] = downstream_dict['projector.bias']
lowercase_ : Any = downstream_dict['model.post_net.linear.weight']
lowercase_ : Tuple = downstream_dict['model.post_net.linear.bias']
return model
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : str = WavaVecaForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ )
lowercase_ : List[str] = downstream_dict['model.linear.weight']
lowercase_ : Union[str, Any] = downstream_dict['model.linear.bias']
return model
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple ):
lowercase_ : Optional[Any] = WavaVecaForXVector.from_pretrained(snake_case_ , config=snake_case_ )
lowercase_ : str = downstream_dict['connector.weight']
lowercase_ : Dict = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase_ : Optional[int] = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
lowercase_ : List[str] = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
lowercase_ : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
lowercase_ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
lowercase_ : int = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
lowercase_ : int = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
lowercase_ : Optional[int] = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : str = torch.load(snake_case_ , map_location='cpu' )
lowercase_ : Tuple = checkpoint['Downstream']
lowercase_ : Tuple = WavaVecaConfig.from_pretrained(snake_case_ )
lowercase_ : int = WavaVecaFeatureExtractor.from_pretrained(
snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ )
lowercase_ : Tuple = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
lowercase_ : Any = convert_classification(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('ForAudioFrameClassification' ):
lowercase_ : Optional[Any] = convert_diarization(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith('ForXVector' ):
lowercase_ : int = convert_xvector(snake_case_ , snake_case_ , snake_case_ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
lowercase_ : Optional[int] = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 368 | """simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[Any] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_array(__SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : Tuple = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE )
if "kernel" in name:
lowercase_ : List[str] = array.transpose()
return torch.from_numpy(__SCREAMING_SNAKE_CASE )
print(F'''Loading model based on config from {config_path}...''' )
lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase_ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase_ : BertSelfAttention = layer.attention.self
lowercase_ : str = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape )
lowercase_ : Tuple = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape )
lowercase_ : int = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape )
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape )
lowercase_ : List[Any] = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase_ : BertSelfOutput = layer.attention.output
lowercase_ : Dict = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape )
lowercase_ : Any = get_encoder_attention_layer_array(
__SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape )
lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' )
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' )
# Intermediate
lowercase_ : BertIntermediate = layer.intermediate
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' )
lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' )
# Output
lowercase_ : BertOutput = layer.output
lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' )
lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' )
lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' )
lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' )
# Embeddings
lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' )
lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' )
lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' )
lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
lowercase_ : int = model.cls.predictions.transform
lowercase_ : str = get_masked_lm_array('dense/kernel' )
lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' )
lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' )
lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' )
lowercase_ : List[str] = get_masked_lm_array('embedding_table' )
# Pooling
lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' )
lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Integration test - should load without any errors ;)
lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 321 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Optional[int] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 138 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__A : int = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
__A : Tuple = cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
lowerCAmelCase : List[Any] = cn.convert_to_negative(_UpperCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCAmelCase, 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : List[Any] = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Any = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase : Dict = canny.canny(_UpperCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
assert gg.gaussian_filter(_UpperCAmelCase, 5, sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Any = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
lowerCAmelCase : List[Any] = conv.img_convolve(_UpperCAmelCase, _UpperCAmelCase ).astype(_UpperCAmelCase )
assert res.any()
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
assert med.median_filter(_UpperCAmelCase, 3 ).any()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : Any = sob.sobel_filter(_UpperCAmelCase )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase : List[Any] = sp.make_sepia(_UpperCAmelCase, 20 )
assert sepia.all()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = bs.Burkes(imread(_UpperCAmelCase, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg", ) -> str:
'''simple docstring'''
lowerCAmelCase : int = rs.NearestNeighbour(imread(_UpperCAmelCase, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Dict = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase : Dict = imread(_UpperCAmelCase, 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase : Any = 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = image[x_coordinate][y_coordinate]
lowerCAmelCase : List[Any] = lbp.get_neighbors_pixel(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
lowerCAmelCase : Tuple = lbp.local_binary_value(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
assert lbp_image.any()
| 138 | 1 |
from sklearn.metrics import fa_score
import datasets
lowerCamelCase_ = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
lowerCamelCase_ = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
lowerCamelCase_ = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=1 , lowercase_="binary" , lowercase_=None ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = fa_score(
lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_ )
return {"f1": float(lowercase_ ) if score.size == 1 else score}
| 14 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 14 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Optional[Any] = ["""input_features"""]
def __init__( self , lowerCAmelCase__=8_0 , lowerCAmelCase__=1_6_0_0_0 , lowerCAmelCase__=1_6_0 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
a__ : Any =n_fft
a__ : List[Any] =hop_length
a__ : Any =chunk_length
a__ : List[Any] =chunk_length * sampling_rate
a__ : Tuple =self.n_samples // hop_length
a__ : str =sampling_rate
a__ : Dict =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase__ , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowerCAmelCase__ , norm="slaney" , mel_scale="slaney" , )
def _lowercase ( self , lowerCAmelCase__ ) -> np.ndarray:
'''simple docstring'''
a__ : Tuple =spectrogram(
lowerCAmelCase__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
a__ : List[Any] =log_spec[:, :-1]
a__ : Optional[int] =np.maximum(lowerCAmelCase__ , log_spec.max() - 8.0 )
a__ : Optional[Any] =(log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
a__ : Union[str, Any] =np.array(lowerCAmelCase__ , np.intaa )
a__ : str =[]
for vector, length in zip(lowerCAmelCase__ , attention_mask.sum(-1 ) ):
a__ : Union[str, Any] =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a__ : Union[str, Any] =padding_value
normed_input_values.append(lowerCAmelCase__ )
else:
a__ : Tuple =[(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "max_length" , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
a__ : Union[str, Any] =isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a__ : List[Any] =is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a__ : List[str] =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
a__ : Union[str, Any] =np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a__ : Union[str, Any] =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ : Union[str, Any] =[np.asarray([raw_speech] ).T]
a__ : Optional[Any] =BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
a__ : List[str] =self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=max_length if max_length else self.n_samples , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
a__ : List[str] =self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
a__ : Tuple =np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
a__ : int =padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
a__ : Optional[Any] =[self._np_extract_fbank_features(lowerCAmelCase__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowerCAmelCase__ ):
a__ : Dict =[np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features]
else:
a__ : int =input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
a__ : Optional[int] =padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
a__ : Optional[int] =padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
def _lowercase ( self ) -> Dict[str, Any]:
'''simple docstring'''
a__ : List[Any] =copy.deepcopy(self.__dict__ )
a__ : Optional[int] =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 95 |
from math import pi
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 95 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __UpperCamelCase ,unittest.TestCase ):
lowercase_ = KandinskyVaaImgaImgPipeline
lowercase_ = ['image_embeds', 'negative_image_embeds', 'image']
lowercase_ = [
'image_embeds',
'negative_image_embeds',
'image',
]
lowercase_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase_ = False
@property
def __lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
return 1_00
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
_a = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_a = UNetaDConditionModel(**lowerCAmelCase_ )
return model
@property
def __lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_a = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_a = self.dummy_unet
_a = self.dummy_movq
_a = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_0_0_8_5,
'beta_end': 0.0_1_2,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_a = DDIMScheduler(**lowerCAmelCase_ )
_a = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=0 ) -> Tuple:
"""simple docstring"""
_a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase_ )
# create init_image
_a = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_a = torch.manual_seed(lowerCAmelCase_ )
else:
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_a = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a = 'cpu'
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
_a = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) )
_a = output.images
_a = pipe(
**self.get_dummy_inputs(lowerCAmelCase_ ) , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a = 'A red cartoon frog, 4k'
_a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase_ )
_a = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
_a = pipeline.to(lowerCAmelCase_ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a = pipe_prior(
lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a = pipeline(
image=lowerCAmelCase_ , image_embeds=lowerCAmelCase_ , negative_image_embeds=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
| 351 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class A ( _a ,_a ):
lowercase_ = 'nat'
lowercase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : List[Any]=64 , lowerCAmelCase_ : Dict=[3, 4, 6, 5] , lowerCAmelCase_ : Dict=[2, 4, 8, 16] , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Dict=3.0 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(lowerCAmelCase_ )
_a = num_heads
_a = kernel_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = layer_norm_eps
_a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(lowerCAmelCase_ ) - 1) )
_a = layer_scale_init_value
_a = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 179 | 0 |
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False )-> Union[str, Any]:
lowerCamelCase_ =scheduler
lowerCamelCase_ =optimizers if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) else [optimizers]
lowerCamelCase_ =split_batches
lowerCamelCase_ =step_with_optimizer
lowerCamelCase_ =GradientState()
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> str:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCamelCase_ =AcceleratorState().num_processes
for _ in range(_SCREAMING_SNAKE_CASE ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
self.scheduler.step(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
return self.scheduler.get_last_lr()
def _snake_case ( self )-> Optional[int]:
return self.scheduler.state_dict()
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Optional[int]:
self.scheduler.load_state_dict(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )-> Any:
return self.scheduler.get_lr()
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> str:
return self.scheduler.print_lr(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 154 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowerCAmelCase (__A , __A , __A , __A):
"""simple docstring"""
if isinstance(__A , __A):
_a = np.full((len(__A), sequence_length, 2) , __A)
else:
_a = np.full((len(__A), sequence_length) , __A)
for i, tensor in enumerate(__A):
if padding_side == "right":
if isinstance(__A , __A):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
else:
if isinstance(__A , __A):
_a = tensor[:sequence_length]
else:
_a = tensor[:sequence_length]
return out_tensor.tolist()
def lowerCAmelCase (__A):
"""simple docstring"""
_a = ord(__A)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
_a = unicodedata.category(__A)
if cat.startswith('''P'''):
return True
return False
@dataclass
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : PreTrainedTokenizerBase
__lowerCamelCase : Union[bool, str, PaddingStrategy] = True
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = -100
__lowerCamelCase : str = "pt"
def a__ (self , A ) -> List[str]:
"""simple docstring"""
import torch
_a = '''label''' if '''label''' in features[0].keys() else '''labels'''
_a = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_a = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
_a = torch.tensor(batch['''entity_ids'''] ).shape[1]
_a = self.tokenizer.padding_side
if padding_side == "right":
_a = [
list(A ) + [self.label_pad_token_id] * (sequence_length - len(A )) for label in labels
]
else:
_a = [
[self.label_pad_token_id] * (sequence_length - len(A )) + list(A ) for label in labels
]
_a = [feature['''ner_tags'''] for feature in features]
_a = padding_tensor(A , -1 , A , A )
_a = [feature['''original_entity_spans'''] for feature in features]
_a = padding_tensor(A , (-1, -1) , A , A )
_a = {k: torch.tensor(A , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 211 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
A__ : Tuple = logging.getLogger(__name__)
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Any = "summarization"
_UpperCAmelCase :List[Any] = ["loss"]
_UpperCAmelCase :str = ROUGE_KEYS
_UpperCAmelCase :Optional[int] = "rouge2"
def __init__( self : Tuple , snake_case__ : Dict , **snake_case__ : Optional[Any] ):
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCamelCase_ : Union[str, Any] =False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(snake_case__ , num_labels=snake_case__ , mode=self.mode , **snake_case__ )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
lowerCamelCase_ : Optional[int] =Path(self.output_dir ) / "metrics.json"
lowerCamelCase_ : List[str] =Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
lowerCamelCase_ : Optional[int] =0
lowerCamelCase_ : Any =defaultdict(snake_case__ )
lowerCamelCase_ : Dict =self.config.model_type
lowerCamelCase_ : List[str] =self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
lowerCamelCase_ : dict ={
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCamelCase_ : int ={
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
lowerCamelCase_ : str ={k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCamelCase_ : Any ={
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCamelCase_ : Optional[int] =get_git_info()["repo_sha"]
lowerCamelCase_ : Any =hparams.num_workers
lowerCamelCase_ : Any =None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , snake_case__ ):
lowerCamelCase_ : Tuple =self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCamelCase_ : str =self.decoder_start_token_id
lowerCamelCase_ : str =(
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
lowerCamelCase_ : List[Any] =False
lowerCamelCase_ : int =self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCamelCase_ : Tuple =self.hparams.eval_max_gen_length
else:
lowerCamelCase_ : Any =self.model.config.max_length
lowerCamelCase_ : str =self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Dict[str, torch.Tensor] ):
lowerCamelCase_ : Any ={
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(snake_case__ , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
lowerCamelCase_ : str =True
return readable_batch
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Optional[Any] ):
return self.model(snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : List[int] ):
lowerCamelCase_ : Optional[int] =self.tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
return lmap(str.strip , snake_case__ )
def UpperCAmelCase__ ( self : Any , snake_case__ : dict ):
lowerCamelCase_ : int =self.tokenizer.pad_token_id
lowerCamelCase_ : List[str] =batch["input_ids"], batch["attention_mask"]
lowerCamelCase_ : Any =batch["labels"]
if isinstance(self.model , snake_case__ ):
lowerCamelCase_ : List[Any] =self.model._shift_right(snake_case__ )
else:
lowerCamelCase_ : Optional[Any] =shift_tokens_right(snake_case__ , snake_case__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCamelCase_ : Any =decoder_input_ids
self.save_readable_batch(snake_case__ )
lowerCamelCase_ : List[str] =self(snake_case__ , attention_mask=snake_case__ , decoder_input_ids=snake_case__ , use_cache=snake_case__ )
lowerCamelCase_ : str =outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCamelCase_ : Tuple =nn.CrossEntropyLoss(ignore_index=snake_case__ )
assert lm_logits.shape[-1] == self.vocab_size
lowerCamelCase_ : List[str] =ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCamelCase_ : Tuple =nn.functional.log_softmax(snake_case__ , dim=-1 )
lowerCamelCase_ : Dict =label_smoothed_nll_loss(
snake_case__ , snake_case__ , self.hparams.label_smoothing , ignore_index=snake_case__ )
return (loss,)
@property
def UpperCAmelCase__ ( self : str ):
return self.tokenizer.pad_token_id
def UpperCAmelCase__ ( self : List[str] , snake_case__ : str , snake_case__ : int ):
lowerCamelCase_ : List[Any] =self._step(snake_case__ )
lowerCamelCase_ : Optional[Any] =dict(zip(self.loss_names , snake_case__ ) )
# tokens per batch
lowerCamelCase_ : Union[str, Any] =batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
lowerCamelCase_ : List[Any] =batch["input_ids"].shape[0]
lowerCamelCase_ : List[str] =batch["input_ids"].eq(self.pad ).sum()
lowerCamelCase_ : int =batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase__ ( self : Any , snake_case__ : List[str] , snake_case__ : int ):
return self._generative_step(snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any]="val" ):
self.step_count += 1
lowerCamelCase_ : Union[str, Any] ={k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCamelCase_ : Any =losses["loss"]
lowerCamelCase_ : int ={
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
lowerCamelCase_ : Optional[Any] =(
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCamelCase_ : torch.FloatTensor =torch.tensor(snake_case__ ).type_as(snake_case__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(snake_case__ )
lowerCamelCase_ : Union[str, Any] ={F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
lowerCamelCase_ : Optional[Any] =self.step_count
self.metrics[prefix].append(snake_case__ ) # callback writes this to self.metrics_save_path
lowerCamelCase_ : str =flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int , snake_case__ : Union[str, Any] ):
return calculate_rouge(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : Dict , snake_case__ : dict ):
lowerCamelCase_ : List[Any] =time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCamelCase_ : Optional[int] =self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=snake_case__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCamelCase_ : Optional[Any] =(time.time() - ta) / batch["input_ids"].shape[0]
lowerCamelCase_ : List[str] =self.ids_to_clean_text(snake_case__ )
lowerCamelCase_ : List[str] =self.ids_to_clean_text(batch["labels"] )
lowerCamelCase_ : Optional[int] =self._step(snake_case__ )
lowerCamelCase_ : List[Any] =dict(zip(self.loss_names , snake_case__ ) )
lowerCamelCase_ : Dict =self.calc_generative_metrics(snake_case__ , snake_case__ )
lowerCamelCase_ : str =np.mean(lmap(snake_case__ , snake_case__ ) )
base_metrics.update(gen_time=snake_case__ , gen_len=snake_case__ , preds=snake_case__ , target=snake_case__ , **snake_case__ )
return base_metrics
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[Any] , snake_case__ : str ):
return self._generative_step(snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Optional[Any] ):
return self.validation_epoch_end(snake_case__ , prefix="test" )
def UpperCAmelCase__ ( self : int , snake_case__ : str ):
lowerCamelCase_ : str =self.n_obs[type_path]
lowerCamelCase_ : Any =self.target_lens[type_path]
lowerCamelCase_ : int =self.dataset_class(
self.tokenizer , type_path=snake_case__ , n_obs=snake_case__ , max_target_length=snake_case__ , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : bool = False ):
lowerCamelCase_ : Tuple =self.get_dataset(snake_case__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCamelCase_ : List[str] =dataset.make_sortish_sampler(snake_case__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
snake_case__ , batch_size=snake_case__ , collate_fn=dataset.collate_fn , shuffle=snake_case__ , num_workers=self.num_workers , sampler=snake_case__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCamelCase_ : str =dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
snake_case__ , batch_sampler=snake_case__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
snake_case__ , batch_size=snake_case__ , collate_fn=dataset.collate_fn , shuffle=snake_case__ , num_workers=self.num_workers , sampler=snake_case__ , )
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : Dict =self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=snake_case__ )
return dataloader
def UpperCAmelCase__ ( self : Tuple ):
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase__ ( self : Optional[Any] ):
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase__ ( snake_case__ : str , snake_case__ : List[str] ):
BaseTransformer.add_model_specific_args(snake_case__ , snake_case__ )
add_generic_args(snake_case__ , snake_case__ )
parser.add_argument(
"--max_source_length" , default=1024 , type=snake_case__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=snake_case__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=snake_case__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=snake_case__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=snake_case__ )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=snake_case__ )
parser.add_argument("--max_tokens_per_batch" , type=snake_case__ , default=snake_case__ )
parser.add_argument("--logger_name" , type=snake_case__ , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=snake_case__ , default=-1 , required=snake_case__ , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=snake_case__ , default=500 , required=snake_case__ , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=snake_case__ , default=-1 , required=snake_case__ , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=snake_case__ , default="summarization" , required=snake_case__ , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=snake_case__ , default=0.0 , required=snake_case__ )
parser.add_argument("--src_lang" , type=snake_case__ , default="" , required=snake_case__ )
parser.add_argument("--tgt_lang" , type=snake_case__ , default="" , required=snake_case__ )
parser.add_argument("--eval_beams" , type=snake_case__ , default=snake_case__ , required=snake_case__ )
parser.add_argument(
"--val_metric" , type=snake_case__ , default=snake_case__ , required=snake_case__ , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=snake_case__ , default=snake_case__ , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=snake_case__ , default=1 , required=snake_case__ , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=snake_case__ , default=-1 , required=snake_case__ , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[Any] = "translation"
_UpperCAmelCase :str = ["loss"]
_UpperCAmelCase :Any = ["bleu"]
_UpperCAmelCase :int = "bleu"
def __init__( self : Union[str, Any] , snake_case__ : int , **snake_case__ : List[str] ):
super().__init__(snake_case__ , **snake_case__ )
lowerCamelCase_ : Any =hparams.src_lang
lowerCamelCase_ : Dict =hparams.tgt_lang
def UpperCAmelCase__ ( self : str , snake_case__ : Optional[int] , snake_case__ : int ):
return calculate_bleu(snake_case__ , snake_case__ )
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=lowerCamelCase__ )
check_output_dir(lowerCamelCase__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCamelCase_ : SummarizationModule =SummarizationModule(lowerCamelCase__ )
else:
lowerCamelCase_ : SummarizationModule =TranslationModule(lowerCamelCase__ )
lowerCamelCase_ : Tuple =Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
lowerCamelCase_ : Optional[Any] =True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase_ : List[str] =os.environ.get("WANDB_PROJECT" , lowerCamelCase__ )
lowerCamelCase_ : str =WandbLogger(name=model.output_dir.name , project=lowerCamelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase_ : Union[str, Any] =WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
lowerCamelCase_ : int =get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCamelCase_ : Optional[int] =False
lowerCamelCase_ : int =args.val_metric == "loss"
lowerCamelCase_ : pl.Trainer =generic_train(
lowerCamelCase__ , lowerCamelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , lowerCamelCase__ ) , early_stopping_callback=lowerCamelCase__ , logger=lowerCamelCase__ , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
lowerCamelCase_ : List[str] =""
lowerCamelCase_ : str =sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=lowerCamelCase__ ) )
if checkpoints:
lowerCamelCase_ : List[Any] =checkpoints[-1]
lowerCamelCase_ : Union[str, Any] =checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
A__ : Tuple = pl.Trainer.add_argparse_args(parser)
A__ : str = SummarizationModule.add_model_specific_args(parser, os.getcwd())
A__ : Any = parser.parse_args()
main(args)
| 358 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[Any] =1
lowerCamelCase_ : Union[str, Any] =3
lowerCamelCase_ : Dict =(32, 32)
lowerCamelCase_ : List[Any] =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case__ )
return image
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Dict =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def UpperCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCamelCase_ : Any =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(snake_case__ )
@property
def UpperCAmelCase__ ( self : int ):
def extract(*snake_case__ : Dict , **snake_case__ : int ):
class lowercase__ :
def __init__( self : Optional[Any] ):
lowerCamelCase_ : Union[str, Any] =torch.ones([0] )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Any ):
self.pixel_values.to(snake_case__ )
return self
return Out()
return extract
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Dict ="cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Dict =self.dummy_cond_unet
lowerCamelCase_ : List[str] =PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCamelCase_ : List[Any] =self.dummy_vae
lowerCamelCase_ : Any =self.dummy_text_encoder
lowerCamelCase_ : List[Any] =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCamelCase_ : List[str] =77
lowerCamelCase_ : Optional[int] =self.dummy_image.to(snake_case__ )
lowerCamelCase_ : Any =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : List[Any] =AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : int =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
lowerCamelCase_ : Optional[int] =alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Any ="A painting of a squirrel eating a burger"
lowerCamelCase_ : Union[str, Any] =torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCamelCase_ : str =alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=snake_case__ , )
lowerCamelCase_ : List[Any] =output.images
lowerCamelCase_ : str =torch.Generator(device=snake_case__ ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] =alt_pipe(
[prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=snake_case__ , return_dict=snake_case__ , )[0]
lowerCamelCase_ : Tuple =image[0, -3:, -3:, -1]
lowerCamelCase_ : Union[str, Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : List[str] =np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Dict =self.dummy_cond_unet
lowerCamelCase_ : Any =PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCamelCase_ : Tuple =self.dummy_vae
lowerCamelCase_ : Union[str, Any] =self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCamelCase_ : Tuple =77
lowerCamelCase_ : str =self.dummy_image.to(snake_case__ )
# put models in fp16
lowerCamelCase_ : Optional[Any] =unet.half()
lowerCamelCase_ : Dict =vae.half()
lowerCamelCase_ : str =bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Optional[int] =AltDiffusionImgaImgPipeline(
unet=snake_case__ , scheduler=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , safety_checker=snake_case__ , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case__ )
lowerCamelCase_ : int =alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCamelCase_ : Tuple ="A painting of a squirrel eating a burger"
lowerCamelCase_ : Tuple =torch.manual_seed(0 )
lowerCamelCase_ : List[Any] =alt_pipe(
[prompt] , generator=snake_case__ , num_inference_steps=2 , output_type="np" , image=snake_case__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : Any =init_image.resize((760, 504) )
lowerCamelCase_ : List[str] ="BAAI/AltDiffusion"
lowerCamelCase_ : List[str] =AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCamelCase_ : Optional[int] ="A fantasy landscape, trending on artstation"
lowerCamelCase_ : List[Any] =torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] =pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : Optional[int] =output.images[0]
lowerCamelCase_ : Tuple =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowerCamelCase_ : Optional[int] =np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : str =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCamelCase_ : Any =init_image.resize((768, 512) )
lowerCamelCase_ : Optional[int] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowerCamelCase_ : Dict ="BAAI/AltDiffusion"
lowerCamelCase_ : List[str] =AltDiffusionImgaImgPipeline.from_pretrained(
snake_case__ , safety_checker=snake_case__ , )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCamelCase_ : Optional[Any] ="A fantasy landscape, trending on artstation"
lowerCamelCase_ : Dict =torch.manual_seed(0 )
lowerCamelCase_ : int =pipe(
prompt=snake_case__ , image=snake_case__ , strength=0.75 , guidance_scale=7.5 , generator=snake_case__ , output_type="np" , )
lowerCamelCase_ : int =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 209 | 0 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Any = 0
__a : Any = [0]
__a : Dict = [0]
__a : Optional[Any] = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 0 )
__a : List[Any] = [60]
__a : Optional[int] = [10]
__a : Tuple = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 0 )
def _lowerCamelCase ( self ):
__a : str = 3
__a : Optional[int] = [1, 2, 3]
__a : Optional[int] = [3, 2, 1]
__a : Tuple = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 5 )
def _lowerCamelCase ( self ):
__a : int = 50
__a : str = [60, 100, 120]
__a : List[Any] = [10, 20, 30]
__a : str = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 220 )
if __name__ == "__main__":
unittest.main() | 160 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__lowerCAmelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Any = AudioClassificationPipeline(model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase )
# test with a raw waveform
__a : Optional[Any] = np.zeros((34000,) )
__a : Union[str, Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : Dict = examples
__a : Tuple = audio_classifier(_UpperCAmelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
__a : List[Any] = audio_classifier(_UpperCAmelCase , top_k=1 )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
self.run_torchaudio(_UpperCAmelCase )
@require_torchaudio
def _lowerCamelCase ( self , _UpperCAmelCase ):
import datasets
# test with a local file
__a : Tuple = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
__a : Union[str, Any] = dataset[0]['''audio''']['''array''']
__a : Tuple = audio_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
@require_torch
def _lowerCamelCase ( self ):
__a : Optional[Any] = '''anton-l/wav2vec2-random-tiny-classifier'''
__a : Union[str, Any] = pipeline('''audio-classification''' , model=_UpperCAmelCase )
__a : Optional[int] = np.ones((8000,) )
__a : Optional[int] = audio_classifier(_UpperCAmelCase , top_k=4 )
__a : Tuple = [
{'''score''': 0.0_8_4_2, '''label''': '''no'''},
{'''score''': 0.0_8_3_8, '''label''': '''up'''},
{'''score''': 0.0_8_3_7, '''label''': '''go'''},
{'''score''': 0.0_8_3_4, '''label''': '''right'''},
]
__a : Dict = [
{'''score''': 0.0_8_4_5, '''label''': '''stop'''},
{'''score''': 0.0_8_4_4, '''label''': '''on'''},
{'''score''': 0.0_8_4_1, '''label''': '''right'''},
{'''score''': 0.0_8_3_4, '''label''': '''left'''},
]
self.assertIn(nested_simplify(_UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__a : List[Any] = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
__a : Optional[Any] = audio_classifier(_UpperCAmelCase , top_k=4 )
self.assertIn(nested_simplify(_UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _lowerCamelCase ( self ):
import datasets
__a : Tuple = '''superb/wav2vec2-base-superb-ks'''
__a : Optional[int] = pipeline('''audio-classification''' , model=_UpperCAmelCase )
__a : int = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
__a : Any = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
__a : Tuple = audio_classifier(_UpperCAmelCase , top_k=4 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=3 ) , [
{'''score''': 0.9_8_1, '''label''': '''go'''},
{'''score''': 0.0_0_7, '''label''': '''up'''},
{'''score''': 0.0_0_6, '''label''': '''_unknown_'''},
{'''score''': 0.0_0_1, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def _lowerCamelCase ( self ):
pass | 160 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_5_5 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = size if size is not None else {'shortest_edge': 2_2_4}
lowerCAmelCase__ :str = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ :Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase__ :Union[str, Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='crop_size' )
lowerCAmelCase__ :Union[str, Any] = do_resize
lowerCAmelCase__ :Union[str, Any] = size
lowerCAmelCase__ :str = resample
lowerCAmelCase__ :Optional[Any] = do_center_crop
lowerCAmelCase__ :Dict = crop_size
lowerCAmelCase__ :int = do_rescale
lowerCAmelCase__ :Tuple = rescale_factor
lowerCAmelCase__ :List[Any] = do_normalize
lowerCAmelCase__ :Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ :Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ :List[str] = do_convert_rgb
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCAmelCase__ :Any = get_resize_output_image_size(__UpperCAmelCase , size=size['shortest_edge'] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Any = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__UpperCAmelCase , size=(size['height'], size['width']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ :Optional[int] = size if size is not None else self.size
lowerCAmelCase__ :Optional[int] = get_size_dict(__UpperCAmelCase , param_name='size' , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = resample if resample is not None else self.resample
lowerCAmelCase__ :str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ :List[str] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ :int = get_size_dict(__UpperCAmelCase , param_name='crop_size' , default_to_square=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ :Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ :Dict = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ :List[str] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ :Union[str, Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ :Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ :Union[str, Any] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ :Optional[int] = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ :List[str] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
lowerCAmelCase__ :Tuple = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ :Optional[int] = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ :int = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ :Dict = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
lowerCAmelCase__ :Dict = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCAmelCase__ :List[Any] = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 254 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Tuple = """facebook/bart-large-mnli"""
__magic_name__ :Any = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
__magic_name__ :Optional[int] = """text_classifier"""
__magic_name__ :List[Any] = AutoTokenizer
__magic_name__ :str = AutoModelForSequenceClassification
__magic_name__ :int = ["""text""", ["""text"""]]
__magic_name__ :int = ["""text"""]
def snake_case ( self ):
'''simple docstring'''
super().setup()
lowerCAmelCase__ :Any = self.model.config
lowerCAmelCase__ :Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
lowerCAmelCase__ :Optional[Any] = int(__UpperCAmelCase )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = labels
return self.pre_processor(
[text] * len(__UpperCAmelCase ) , [F"This example is {label}" for label in labels] , return_tensors='pt' , padding='max_length' , )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = outputs.logits
lowerCAmelCase__ :int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 254 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> tuple[float, list[float]]:
"""simple docstring"""
snake_case = list(range(len(_UpperCamelCase ) ) )
snake_case = [v / w for v, w in zip(_UpperCamelCase , _UpperCamelCase )]
index.sort(key=lambda _UpperCamelCase : ratio[i] , reverse=_UpperCamelCase )
snake_case = 0
snake_case = [0] * len(_UpperCamelCase )
for i in index:
if weight[i] <= capacity:
snake_case = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 150 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class UpperCAmelCase :
def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :Dict=13 , lowercase_ :Tuple=7 , lowercase_ :Union[str, Any]=True , lowercase_ :List[Any]=True , lowercase_ :Optional[int]=True , lowercase_ :List[str]=99 , lowercase_ :Union[str, Any]=32 , lowercase_ :List[str]=5 , lowercase_ :Tuple=4 , lowercase_ :List[Any]=37 , lowercase_ :str="gelu" , lowercase_ :Dict=0.1 , lowercase_ :str=0.1 , lowercase_ :Tuple=5_12 , lowercase_ :Tuple=16 , lowercase_ :Optional[Any]=2 , lowercase_ :str=0.0_2 , lowercase_ :Dict=3 , lowercase_ :str=4 , lowercase_ :Dict=None , )-> Dict:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = self.vocab_size - 1
def UpperCAmelCase_ ( self :Any )-> Optional[Any]:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[Any] , lowercase_ :Dict , lowercase_ :Union[str, Any] , lowercase_ :Tuple , *lowercase_ :Tuple )-> Optional[int]:
A__ = OpenAIGPTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
A__ = model(lowercase_ , token_type_ids=lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :List[Any] , lowercase_ :str , lowercase_ :List[str] , lowercase_ :List[str] , *lowercase_ :Optional[int] )-> Tuple:
A__ = OpenAIGPTLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Optional[Any] , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :List[str] , *lowercase_ :Any )-> str:
A__ = OpenAIGPTDoubleHeadsModel(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Tuple , lowercase_ :Any , lowercase_ :Optional[int] , lowercase_ :Any , *lowercase_ :str )-> Union[str, Any]:
A__ = self.num_labels
A__ = OpenAIGPTForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self :Dict )-> Optional[int]:
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
), (
A__
), (
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__lowercase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowercase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowercase = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Union[str, Any] , lowercase_ :Tuple , lowercase_ :Tuple , lowercase_ :Dict , lowercase_ :Tuple )-> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Dict , lowercase_ :str=False )-> Optional[int]:
A__ = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , )
A__ = inputs_dict["labels"]
A__ = inputs_dict["labels"]
A__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , )
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def UpperCAmelCase_ ( self :str )-> int:
A__ = OpenAIGPTModelTester(self )
A__ = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def UpperCAmelCase_ ( self :int )-> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self :Tuple )-> str:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowercase_ )
def UpperCAmelCase_ ( self :str )-> Tuple:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
def UpperCAmelCase_ ( self :str )-> Any:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowercase_ )
def UpperCAmelCase_ ( self :str )-> str:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self :Dict )-> Any:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = OpenAIGPTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self :Union[str, Any] )-> List[str]:
A__ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowercase_ )
A__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=lowercase_ ) # the president is
A__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A__ = model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 123 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__lowerCAmelCase : Tuple =trt.Logger(trt.Logger.WARNING)
__lowerCAmelCase : Optional[Any] =absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__lowerCAmelCase : List[Any] =logging.getLogger(__name__)
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__lowerCAmelCase : Tuple =parser.parse_args()
if args.tokenizer_name:
__lowerCAmelCase : int =AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__lowerCAmelCase : Union[str, Any] =args.per_device_eval_batch_size
__lowerCAmelCase : List[Any] =(args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__lowerCAmelCase : Tuple =True
__lowerCAmelCase : int ="temp_engine/bert-fp32.engine"
if args.fpaa:
__lowerCAmelCase : Tuple ="temp_engine/bert-fp16.engine"
if args.inta:
__lowerCAmelCase : Optional[int] ="temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__lowerCAmelCase : Tuple =1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__lowerCAmelCase : Optional[Any] =[network.get_input(i) for i in range(network.num_inputs)]
__lowerCAmelCase : Any =[_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__lowerCAmelCase : Optional[Any] =1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__lowerCAmelCase : int =builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__lowerCAmelCase : Dict =builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
A__ = np.asarray(inputs["input_ids"] , dtype=np.intaa )
A__ = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
A__ = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCamelCase )
# start time
A__ = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowerCamelCase ) for d_inp in d_inputs] + [int(_lowerCamelCase ), int(_lowerCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
A__ = time.time()
A__ = end_time - start_time
A__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__lowerCAmelCase : str =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase : List[Any] =load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__lowerCAmelCase : Optional[Any] =raw_datasets["validation"].column_names
__lowerCAmelCase : Optional[Any] ="question" if "question" in column_names else column_names[0]
__lowerCAmelCase : str ="context" if "context" in column_names else column_names[1]
__lowerCAmelCase : Optional[Any] ="answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__lowerCAmelCase : Any =tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__lowerCAmelCase : Any =min(args.max_seq_length, tokenizer.model_max_length)
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=_lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A__ = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A__ = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A__ = tokenized_examples.sequence_ids(_lowerCamelCase )
A__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
__lowerCAmelCase : str =raw_datasets["validation"]
# Validation Feature Creation
__lowerCAmelCase : Union[str, Any] =eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__lowerCAmelCase : List[Any] =default_data_collator
__lowerCAmelCase : List[Any] =eval_dataset.remove_columns(["example_id", "offset_mapping"])
__lowerCAmelCase : List[str] =DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
A__ = postprocess_qa_predictions(
examples=_lowerCamelCase , features=_lowerCamelCase , predictions=_lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A__ = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
A__ = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
A__ = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowerCamelCase , label_ids=_lowerCamelCase )
__lowerCAmelCase : Tuple =load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] ):
return trt.volume(engine.get_binding_shape(_lowerCamelCase ) ) * engine.get_binding_dtype(_lowerCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
__lowerCAmelCase : Any =[cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__lowerCAmelCase : List[Any] =cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__lowerCAmelCase : List[str] =cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__lowerCAmelCase : List[str] =cuda.mem_alloc(h_outputa.nbytes)
__lowerCAmelCase : int =cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__lowerCAmelCase : Optional[Any] =cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__lowerCAmelCase : str =0.0
__lowerCAmelCase : Tuple =0
__lowerCAmelCase : List[str] =timeit.default_timer()
__lowerCAmelCase : Union[str, Any] =None
for step, batch in enumerate(eval_dataloader):
__lowerCAmelCase , __lowerCAmelCase : Dict =model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__lowerCAmelCase , __lowerCAmelCase : List[Any] =outputs
__lowerCAmelCase : Tuple =torch.tensor(start_logits)
__lowerCAmelCase : Tuple =torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__lowerCAmelCase : Tuple =accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__lowerCAmelCase : Union[str, Any] =accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__lowerCAmelCase : int =(accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__lowerCAmelCase : List[Any] =logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__lowerCAmelCase : Dict =nested_truncate(all_preds, len(eval_dataset))
__lowerCAmelCase : Optional[int] =timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
__lowerCAmelCase : Optional[Any] =post_processing_function(eval_examples, eval_dataset, all_preds)
__lowerCAmelCase : Optional[Any] =metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 123 | 1 |
"""simple docstring"""
def _A (__a = 1_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 91 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
UpperCAmelCase_ : Dict = logging.getLogger(__name__)
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30522, type=int)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, """rb""") as fp:
UpperCAmelCase_ : Union[str, Any] = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
UpperCAmelCase_ : Any = Counter()
for tk_ids in data:
counter.update(tk_ids)
UpperCAmelCase_ : List[Any] = [0] * args.vocab_size
for k, v in counter.items():
UpperCAmelCase_ : Dict = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 91 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=13 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[Any]=32 , _UpperCAmelCase : int=5 , _UpperCAmelCase : str=4 , _UpperCAmelCase : Any=37 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Any=10 , _UpperCAmelCase : Tuple=0.02 , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ = (image_size // patch_size) ** 2
UpperCAmelCase__ = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = FlaxViTModel(config=_UpperCAmelCase )
UpperCAmelCase__ = model(_UpperCAmelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ = (self.image_size, self.image_size)
UpperCAmelCase__ = (self.patch_size, self.patch_size)
UpperCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.type_sequence_label_size
UpperCAmelCase__ = FlaxViTForImageClassification(config=_UpperCAmelCase )
UpperCAmelCase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ = 1
UpperCAmelCase__ = FlaxViTForImageClassification(_UpperCAmelCase )
UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ = model(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = FlaxViTModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(_UpperCAmelCase )
UpperCAmelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(_UpperCAmelCase : Dict , **_UpperCAmelCase : Union[str, Any] ):
return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase__ = model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase__ = model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
UpperCAmelCase__ = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 61 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = ShapEPipeline
lowerCAmelCase_ : List[str] = ["""prompt"""]
lowerCAmelCase_ : Union[str, Any] = ["""prompt"""]
lowerCAmelCase_ : str = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase_ : int = False
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
return 8
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCAmelCase__ = PriorTransformer(**_UpperCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ = ShapERenderer(**_UpperCAmelCase )
return model
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.dummy_prior
UpperCAmelCase__ = self.dummy_text_encoder
UpperCAmelCase__ = self.dummy_tokenizer
UpperCAmelCase__ = self.dummy_renderer
UpperCAmelCase__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
UpperCAmelCase__ = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any]=0 ):
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("""mps""" ):
UpperCAmelCase__ = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase__ = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """cpu"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
UpperCAmelCase__ = output.images[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase__ = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = torch_device == """cpu"""
UpperCAmelCase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = self.pipeline_class(**_UpperCAmelCase )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ = batch_size * [inputs[key]]
UpperCAmelCase__ = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
UpperCAmelCase__ = ShapEPipeline.from_pretrained("""openai/shap-e""" )
UpperCAmelCase__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
UpperCAmelCase__ = pipe(
"""a shark""" , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 61 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase__ = 5_00_00
lowerCAmelCase__ = 50_00
lowerCAmelCase__ ,lowerCAmelCase__ = os.path.split(__file__)
lowerCAmelCase__ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _UpperCAmelCase (UpperCamelCase__ : datasets.Dataset , UpperCamelCase__ : Optional[Any] ):
for i in range(UpperCamelCase__ ):
_A : Optional[int] = dataset[i]
@get_duration
def _UpperCAmelCase (UpperCamelCase__ : datasets.Dataset , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ):
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
_A : int = dataset[i : i + batch_size]
@get_duration
def _UpperCAmelCase (UpperCamelCase__ : datasets.Dataset , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
_A : Optional[int] = dataset[i]
@get_duration
def _UpperCAmelCase (UpperCamelCase__ : datasets.Dataset , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ):
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
_A : Dict = dataset[i : i + batch_size]
def _UpperCAmelCase ():
_A : Optional[int] = {"num examples": SPEED_TEST_N_EXAMPLES}
_A : List[Any] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
_A : str = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
_A : List[Any] = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
_A : Dict = generate_example_dataset(
os.path.join(UpperCamelCase__ , "dataset.arrow" ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(UpperCamelCase__ ) )
_A : List[str] = func(UpperCamelCase__ , **UpperCamelCase__ )
print("shuffling dataset" )
_A : Union[str, Any] = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(UpperCamelCase__ ) )
_A : Dict = func(
UpperCamelCase__ , **UpperCamelCase__ )
with open(UpperCamelCase__ , "wb" ) as f:
f.write(json.dumps(UpperCamelCase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 11 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=0.9 , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , ) -> Any:
snake_case_ = size if size is not None else {"""shortest_edge""": 30}
snake_case_ = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize_and_center_crop
snake_case_ = size
snake_case_ = crop_pct
snake_case_ = crop_size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
def lowerCAmelCase_ ( self ) -> Any:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCamelCase ( __snake_case , unittest.TestCase ):
lowerCamelCase_ : List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self ) -> int:
snake_case_ = PoolFormerImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self ) -> Tuple:
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_pct""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase , """image_std""" ) )
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self ) -> List[str]:
pass
def lowerCAmelCase_ ( self ) -> Tuple:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 34 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 34 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a__: int = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase=None,__lowerCamelCase=None ):
A__ = self.layer[current_layer](__lowerCamelCase,__lowerCamelCase,head_mask[current_layer] )
A__ = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , UpperCamelCase__ , )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,__lowerCamelCase ):
super().__init__(__lowerCamelCase )
A__ = BertEncoderWithPabee(__lowerCamelCase )
self.init_weights()
A__ = 0
A__ = 0
A__ = 0
A__ = 0
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = threshold
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = patience
def UpperCamelCase ( self ):
A__ = 0
A__ = 0
def UpperCamelCase ( self ):
A__ = self.inference_layers_num / self.inference_instances_num
A__ = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(__lowerCamelCase )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=False,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
A__ = input_ids.size()
elif inputs_embeds is not None:
A__ = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
A__ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
A__ = torch.ones(__lowerCamelCase,device=__lowerCamelCase )
if token_type_ids is None:
A__ = torch.zeros(__lowerCamelCase,dtype=torch.long,device=__lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
A__ = self.get_extended_attention_mask(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
A__ , A__ , A__ = encoder_hidden_states.size()
A__ = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
A__ = torch.ones(__lowerCamelCase,device=__lowerCamelCase )
A__ = self.invert_attention_mask(__lowerCamelCase )
else:
A__ = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
A__ = self.get_head_mask(__lowerCamelCase,self.config.num_hidden_layers )
A__ = self.embeddings(
input_ids=__lowerCamelCase,position_ids=__lowerCamelCase,token_type_ids=__lowerCamelCase,inputs_embeds=__lowerCamelCase )
A__ = embedding_output
if self.training:
A__ = []
for i in range(self.config.num_hidden_layers ):
A__ = self.encoder.adaptive_forward(
__lowerCamelCase,current_layer=__lowerCamelCase,attention_mask=__lowerCamelCase,head_mask=__lowerCamelCase )
A__ = self.pooler(__lowerCamelCase )
A__ = output_layers[i](output_dropout(__lowerCamelCase ) )
res.append(__lowerCamelCase )
elif self.patience == 0: # Use all layers for inference
A__ = self.encoder(
__lowerCamelCase,attention_mask=__lowerCamelCase,head_mask=__lowerCamelCase,encoder_hidden_states=__lowerCamelCase,encoder_attention_mask=__lowerCamelCase,)
A__ = self.pooler(encoder_outputs[0] )
A__ = [output_layers[self.config.num_hidden_layers - 1](__lowerCamelCase )]
else:
A__ = 0
A__ = None
A__ = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
A__ = self.encoder.adaptive_forward(
__lowerCamelCase,current_layer=__lowerCamelCase,attention_mask=__lowerCamelCase,head_mask=__lowerCamelCase )
A__ = self.pooler(__lowerCamelCase )
A__ = output_layers[i](__lowerCamelCase )
if regression:
A__ = logits.detach()
if patient_result is not None:
A__ = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
A__ = 0
else:
A__ = logits.detach().argmax(dim=1 )
if patient_result is not None:
A__ = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__lowerCamelCase ) ):
patient_counter += 1
else:
A__ = 0
A__ = logits
if patient_counter == self.patience:
break
A__ = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , UpperCamelCase__ , )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,__lowerCamelCase ):
super().__init__(__lowerCamelCase )
A__ = config.num_labels
A__ = BertModelWithPabee(__lowerCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.ModuleList(
[nn.Linear(config.hidden_size,self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=None,):
A__ = self.bert(
input_ids=__lowerCamelCase,attention_mask=__lowerCamelCase,token_type_ids=__lowerCamelCase,position_ids=__lowerCamelCase,head_mask=__lowerCamelCase,inputs_embeds=__lowerCamelCase,output_dropout=self.dropout,output_layers=self.classifiers,regression=self.num_labels == 1,)
A__ = (logits[-1],)
if labels is not None:
A__ = None
A__ = 0
for ix, logits_item in enumerate(__lowerCamelCase ):
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits_item.view(-1 ),labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits_item.view(-1,self.num_labels ),labels.view(-1 ) )
if total_loss is None:
A__ = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
A__ = (total_loss / total_weights,) + outputs
return outputs
| 193 | 0 |
"""simple docstring"""
from math import factorial
def UpperCAmelCase__ (lowerCAmelCase_ = 100 ):
'''simple docstring'''
return sum(map(lowerCAmelCase_ , str(factorial(lowerCAmelCase_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 195 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 195 | 1 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =[
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
SCREAMING_SNAKE_CASE_: List[str] =[
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
SCREAMING_SNAKE_CASE_: List[Any] =[
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
SCREAMING_SNAKE_CASE_: Any =[
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
SCREAMING_SNAKE_CASE_: Tuple =[
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
SCREAMING_SNAKE_CASE_: int =[
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
SCREAMING_SNAKE_CASE_: Optional[Any] =[
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
SCREAMING_SNAKE_CASE_: Optional[Any] =[
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 1 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
else:
return a * actual_power(UpperCAmelCase , int(b / 2 ) ) * actual_power(UpperCAmelCase , int(b / 2 ) )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(UpperCAmelCase , UpperCAmelCase )
return actual_power(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 142 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='poolformer'
def __init__(self , a_=3 , a_=16 , a_=16 , a_=3 , a_=4.0 , a_=[2, 2, 6, 2] , a_=[64, 1_28, 3_20, 5_12] , a_=[7, 3, 3, 3] , a_=[4, 2, 2, 2] , a_=[2, 1, 1, 1] , a_=4 , a_=0.0 , a_="gelu" , a_=True , a_=1E-5 , a_=0.02 , **a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = num_channels
__snake_case : str = patch_size
__snake_case : int = stride
__snake_case : Optional[int] = padding
__snake_case : Any = pool_size
__snake_case : int = hidden_sizes
__snake_case : Optional[Any] = mlp_ratio
__snake_case : Any = depths
__snake_case : Optional[int] = patch_sizes
__snake_case : Optional[Any] = strides
__snake_case : Optional[int] = num_encoder_blocks
__snake_case : List[str] = drop_path_rate
__snake_case : Dict = hidden_act
__snake_case : Optional[Any] = use_layer_scale
__snake_case : int = layer_scale_init_value
__snake_case : int = initializer_range
super().__init__(**a_ )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 2E-3
| 370 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = KandinskyInpaintPipeline
lowercase__ = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
lowercase__ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
lowercase__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase__ = False
@property
def __lowerCAmelCase ( self : int ):
return 3_2
@property
def __lowerCAmelCase ( self : str ):
return 3_2
@property
def __lowerCAmelCase ( self : str ):
return self.time_input_dim
@property
def __lowerCAmelCase ( self : int ):
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Optional[Any] ):
return 1_0_0
@property
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Optional[int] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Dict ):
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=3_7 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1_0_0_5 ,)
lowerCAmelCase__ : Any = MultilingualCLIP(lowercase_ )
lowerCAmelCase__ : Optional[int] = text_encoder.eval()
return text_encoder
@property
def __lowerCAmelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase__ : str = UNetaDConditionModel(**lowercase_ )
return model
@property
def __lowerCAmelCase ( self : List[Any] ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Union[str, Any] = self.dummy_text_encoder
lowerCAmelCase__ : List[str] = self.dummy_tokenizer
lowerCAmelCase__ : List[str] = self.dummy_unet
lowerCAmelCase__ : int = self.dummy_movq
lowerCAmelCase__ : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='''linear''' ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=lowercase_ ,set_alpha_to_one=lowercase_ ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=lowercase_ ,)
lowerCAmelCase__ : Optional[int] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Dict ,lowercase_ : int=0 ):
lowerCAmelCase__ : Dict = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(lowercase_ )
# create init_image
lowerCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase__ : Dict = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowerCAmelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowercase_ ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
lowerCAmelCase__ : Tuple = np.ones((6_4, 6_4) ,dtype=np.floataa )
lowerCAmelCase__ : List[Any] = 0
if str(lowercase_ ).startswith('''mps''' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase__ : Dict = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase__ : List[str] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : List[Any] = '''cpu'''
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = self.pipeline_class(**lowercase_ )
lowerCAmelCase__ : Optional[Any] = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : str = pipe(**self.get_dummy_inputs(lowercase_ ) )
lowerCAmelCase__ : Optional[int] = output.images
lowerCAmelCase__ : str = pipe(
**self.get_dummy_inputs(lowercase_ ) ,return_dict=lowercase_ ,)[0]
lowerCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase__ : List[Any] = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def __lowerCAmelCase ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
lowerCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCAmelCase__ : Optional[Any] = np.ones((7_6_8, 7_6_8) ,dtype=np.floataa )
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[str] = '''a hat'''
lowerCAmelCase__ : List[str] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(lowercase_ )
lowerCAmelCase__ : int = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' ,torch_dtype=torch.floataa )
lowerCAmelCase__ : List[Any] = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase__ : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = pipe_prior(
lowercase_ ,generator=lowercase_ ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowerCAmelCase__ : Union[str, Any] = pipeline(
lowercase_ ,image=lowercase_ ,mask_image=lowercase_ ,image_embeds=lowercase_ ,negative_image_embeds=lowercase_ ,generator=lowercase_ ,num_inference_steps=1_0_0 ,height=7_6_8 ,width=7_6_8 ,output_type='''np''' ,)
lowerCAmelCase__ : Any = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase_ ,lowercase_ )
| 106 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 321 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _snake_case ( snake_case , snake_case ):
UpperCamelCase__ = 1
@register_to_config
def __init__( self , _a=2_000 , _a=0.1 , _a=20 , _a=1e-3 ):
__magic_name__ : Dict = None
__magic_name__ : Tuple = None
__magic_name__ : Tuple = None
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Tuple = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__magic_name__ : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__magic_name__ : Union[str, Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__magic_name__ : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__magic_name__ : List[Any] = std.unsqueeze(-1 )
__magic_name__ : List[str] = -score / std
# compute
__magic_name__ : str = -1.0 / len(self.timesteps )
__magic_name__ : List[str] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__magic_name__ : Tuple = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__magic_name__ : List[str] = beta_t.unsqueeze(-1 )
__magic_name__ : Dict = -0.5 * beta_t * x
__magic_name__ : Dict = torch.sqrt(_a )
__magic_name__ : List[Any] = drift - diffusion**2 * score
__magic_name__ : List[str] = x + drift * dt
# add noise
__magic_name__ : int = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
__magic_name__ : List[str] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 365 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _snake_case :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=3 , _a=0.6 , _a=None , ):
__magic_name__ : Tuple = parent
__magic_name__ : Tuple = batch_size
__magic_name__ : int = image_size
__magic_name__ : Optional[Any] = patch_size
__magic_name__ : int = num_channels
__magic_name__ : Dict = is_training
__magic_name__ : Tuple = use_labels
__magic_name__ : List[str] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Optional[Any] = num_attention_heads
__magic_name__ : int = intermediate_size
__magic_name__ : int = hidden_act
__magic_name__ : Optional[Any] = hidden_dropout_prob
__magic_name__ : List[Any] = attention_probs_dropout_prob
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Optional[Any] = initializer_range
__magic_name__ : List[str] = mask_ratio
__magic_name__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__magic_name__ : Union[str, Any] = (image_size // patch_size) ** 2
__magic_name__ : Optional[int] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Tuple = None
if self.use_labels:
__magic_name__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : List[str] = TFViTMAEModel(config=_a )
__magic_name__ : List[Any] = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Dict = TFViTMAEForPreTraining(_a )
__magic_name__ : Any = model(_a , training=_a )
# expected sequence length = num_patches
__magic_name__ : Tuple = (self.image_size // self.patch_size) ** 2
__magic_name__ : Dict = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__magic_name__ : Dict = 1
__magic_name__ : int = TFViTMAEForPreTraining(_a )
__magic_name__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Any = model(_a , training=_a )
__magic_name__ : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__)) : List[Any] = config_and_inputs
__magic_name__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase__ = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = TFViTMAEModelTester(self )
__magic_name__ : List[str] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Dict = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Layer ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(_a )
__magic_name__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : int = [*signature.parameters.keys()]
__magic_name__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def SCREAMING_SNAKE_CASE ( self ):
# make the mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ : str = model_class(_a )
__magic_name__ : List[Any] = self._prepare_for_class(_a , _a )
__magic_name__ : Union[str, Any] = model(_a , noise=_a )
__magic_name__ : int = copy.deepcopy(self._prepare_for_class(_a , _a ) )
__magic_name__ : str = model(**_a , noise=_a )
__magic_name__ : Optional[int] = outputs_dict[0].numpy()
__magic_name__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def SCREAMING_SNAKE_CASE ( self ):
# make the mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_a ):
__magic_name__ : Union[str, Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_a ):
__magic_name__ : List[str] = v.numpy()
else:
__magic_name__ : str = np.array(_a )
return inputs_np_dict
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class(_a )
__magic_name__ : int = self._prepare_for_class(_a , _a )
__magic_name__ : Optional[Any] = prepare_numpy_arrays(_a )
__magic_name__ : Union[str, Any] = model(_a , noise=_a )
__magic_name__ : int = model(**_a , noise=_a )
self.assert_outputs_same(_a , _a )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
# make masks reproducible
np.random.seed(2 )
__magic_name__ : Union[str, Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__magic_name__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ : Dict = tf.constant(_a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__magic_name__ : List[str] = tf_noise
super().check_pt_tf_models(_a , _a , _a )
def SCREAMING_SNAKE_CASE ( self ):
# make mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Any = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_a )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(_a , _a ),)
if isinstance(_a , _a )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_a , "_keras_serializable" , _a )
}
__magic_name__ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__magic_name__ : Optional[int] = tf.convert_to_tensor(_a )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
__magic_name__ : Optional[int] = main_layer_class(_a )
__magic_name__ : Optional[int] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__magic_name__ : Any = tf.keras.Model(_a , outputs=main_layer(_a ) )
__magic_name__ : str = model(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ : Dict = os.path.join(_a , "keras_model.h5" )
model.save(_a )
__magic_name__ : Optional[int] = tf.keras.models.load_model(
_a , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_a , tf.keras.Model )
__magic_name__ : Optional[Any] = model(_a )
self.assert_outputs_same(_a , _a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# make mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ : int = model_class(_a )
__magic_name__ : Tuple = self._prepare_for_class(_a , _a )
__magic_name__ : List[Any] = model(_a , noise=_a )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ : Optional[int] = outputs.last_hidden_state.numpy()
__magic_name__ : int = 0
else:
__magic_name__ : List[Any] = outputs.logits.numpy()
__magic_name__ : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a , saved_model=_a )
__magic_name__ : List[str] = model_class.from_pretrained(_a )
__magic_name__ : Optional[Any] = model(_a , noise=_a )
if model_class.__name__ == "TFViTMAEModel":
__magic_name__ : int = after_outputs["last_hidden_state"].numpy()
__magic_name__ : str = 0
else:
__magic_name__ : Any = after_outputs["logits"].numpy()
__magic_name__ : List[str] = 0
__magic_name__ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a , 1e-5 )
def SCREAMING_SNAKE_CASE ( self ):
# make mask reproducible
np.random.seed(2 )
__magic_name__ , __magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : str = int((config.image_size // config.patch_size) ** 2 )
__magic_name__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(_a )
__magic_name__ : List[str] = self._prepare_for_class(_a , _a )
__magic_name__ : Any = model(_a , noise=_a )
__magic_name__ : Optional[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_a )
__magic_name__ : Optional[int] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__magic_name__ : Optional[Any] = model_class.from_config(model.config )
__magic_name__ : Tuple = new_model(_a ) # Build model
new_model.set_weights(model.get_weights() )
__magic_name__ : List[str] = new_model(_a , noise=_a )
self.assert_outputs_same(_a , _a )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
__magic_name__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__magic_name__ : Dict = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
__magic_name__ : str = self.default_image_processor
__magic_name__ : int = prepare_img()
__magic_name__ : Union[str, Any] = image_processor(images=_a , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__magic_name__ : Optional[int] = ViTMAEConfig()
__magic_name__ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__magic_name__ : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
__magic_name__ : Tuple = model(**_a , noise=_a )
# verify the logits
__magic_name__ : str = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _a )
__magic_name__ : Union[str, Any] = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _a , atol=1e-4 )
| 41 | 0 |
from sklearn.metrics import fa_score
import datasets
_lowerCamelCase : Dict = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
_lowerCamelCase : Any = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
_lowerCamelCase : List[Any] = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''')),
'''references''': datasets.Sequence(datasets.Value('''int32''')),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32'''),
'''references''': datasets.Value('''int32'''),
}) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : Optional[int]="binary" , UpperCAmelCase__ : str=None) ->Optional[Any]:
'''simple docstring'''
A__ = fa_score(
UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ , pos_label=UpperCAmelCase__ , average=UpperCAmelCase__ , sample_weight=UpperCAmelCase__)
return {"f1": float(UpperCAmelCase__) if score.size == 1 else score}
| 14 |
_lowerCamelCase : Optional[int] = 65521
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 1
A__ = 0
for plain_chr in plain_text:
A__ = (a + ord(lowercase_ )) % MOD_ADLER
A__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case ( __lowerCAmelCase ):
a__ = """new-model"""
if is_tf_available():
class __snake_case ( __lowerCAmelCase ):
a__ = NewModelConfig
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: str = 'bert-base-cased'
a__: str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: int = TFAutoModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Tuple = 'bert-base-cased'
a__: Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: int = TFAutoModelForPreTraining.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Tuple = TFAutoModelForCausalLM.from_pretrained(lowercase)
a__ , a__: List[str] = TFAutoModelForCausalLM.from_pretrained(lowercase , output_loading_info=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Dict = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: int = TFAutoModelWithLMHead.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: int = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase)
a__ , a__: Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , output_loading_info=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase)
a__ , a__: Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , output_loading_info=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__: Any = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__: Optional[int] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
@require_tensorflow_probability
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
a__: Dict = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__: Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained(lowercase)
a__ , a__: Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowercase , output_loading_info=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: int = TFAutoModelWithLMHead.from_pretrained(lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_44_10)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_44_10)
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Union[str, Any] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny')
self.assertIsInstance(lowercase , lowercase)
a__: Tuple = copy.deepcopy(model.config)
a__: int = ['FunnelBaseModel']
a__: List[str] = TFAutoModel.from_config(lowercase)
self.assertIsInstance(lowercase , lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase)
a__: Optional[int] = TFAutoModel.from_pretrained(lowercase)
self.assertIsInstance(lowercase , lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
try:
AutoConfig.register('new-model' , lowercase)
a__: List[Any] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(lowercase):
auto_class.register(lowercase , lowercase)
auto_class.register(lowercase , lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase):
auto_class.register(lowercase , lowercase)
# Now that the config is registered, it can be used as any other config with the auto-API
a__: Any = BertModelTester(self).get_config()
a__: Tuple = NewModelConfig(**tiny_config.to_dict())
a__: Tuple = auto_class.from_config(lowercase)
self.assertIsInstance(lowercase , lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase)
a__: Dict = auto_class.from_pretrained(lowercase)
self.assertIsInstance(lowercase , lowercase)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , 'bert-base is not a local folder and is not a valid model identifier'):
a__: List[str] = TFAutoModel.from_pretrained('bert-base')
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
a__: Optional[int] = TFAutoModel.from_pretrained(lowercase , revision='aaaaaa')
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
lowercase , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
a__: Dict = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
with self.assertRaisesRegex(lowercase , 'Use `from_pt=True` to load this model'):
a__: Tuple = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
a__: Union[str, Any] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
a__: Dict = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
with RequestCounter() as counter:
a__: Optional[Any] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 203 | """simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = Dict[str, Any]
lowercase__ = List[Prediction]
@add_end_docstrings(__lowerCAmelCase )
class __snake_case ( __lowerCAmelCase ):
def __init__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
super().__init__(*lowercase , **lowercase)
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.')
requires_backends(self , 'vision')
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def lowerCamelCase_ ( self , **lowercase) -> int:
'''simple docstring'''
a__: Optional[Any] = {}
if "threshold" in kwargs:
a__: Dict = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *lowercase , **lowercase) -> Union[Predictions, List[Prediction]]:
'''simple docstring'''
return super().__call__(*lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = load_image(lowercase)
a__: List[Any] = torch.IntTensor([[image.height, image.width]])
a__: Any = self.image_processor(images=[image] , return_tensors='pt')
if self.tokenizer is not None:
a__: Any = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt')
a__: List[str] = target_size
return inputs
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
a__: Any = model_inputs.pop('target_size')
a__: Union[str, Any] = self.model(**lowercase)
a__: List[str] = outputs.__class__({'target_size': target_size, **outputs})
if self.tokenizer is not None:
a__: Union[str, Any] = model_inputs['bbox']
return model_outputs
def lowerCamelCase_ ( self , lowercase , lowercase=0.9) -> Optional[Any]:
'''simple docstring'''
a__: int = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
a__ , a__: str = target_size[0].tolist()
def unnormalize(lowercase):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
]))
a__ , a__: Optional[Any] = model_outputs['logits'].squeeze(0).softmax(dim=-1).max(dim=-1)
a__: str = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
a__: Union[str, Any] = [unnormalize(lowercase) for bbox in model_outputs['bbox'].squeeze(0)]
a__: Dict = ['score', 'label', 'box']
a__: Any = [dict(zip(lowercase , lowercase)) for vals in zip(scores.tolist() , lowercase , lowercase) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
a__: List[str] = self.image_processor.post_process_object_detection(lowercase , lowercase , lowercase)
a__: Tuple = raw_annotations[0]
a__: List[str] = raw_annotation['scores']
a__: int = raw_annotation['labels']
a__: int = raw_annotation['boxes']
a__: List[Any] = scores.tolist()
a__: Any = [self.model.config.idalabel[label.item()] for label in labels]
a__: Dict = [self._get_bounding_box(lowercase) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
a__: Optional[Any] = ['score', 'label', 'box']
a__: List[Any] = [
dict(zip(lowercase , lowercase))
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'])
]
return annotation
def lowerCamelCase_ ( self , lowercase) -> Dict[str, int]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.')
a__ , a__ , a__ , a__: List[Any] = box.int().tolist()
a__: Optional[int] = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 203 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: Any=13 , UpperCamelCase__: List[str]=30 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: List[Any]=3 , UpperCamelCase__: int=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Union[str, Any]=32 , UpperCamelCase__: Tuple=2 , UpperCamelCase__: List[Any]=4 , UpperCamelCase__: str=37 , UpperCamelCase__: str="gelu" , UpperCamelCase__: Tuple=0.1 , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: Optional[int]=10 , UpperCamelCase__: List[Any]=0.02 , UpperCamelCase__: Dict=3 , UpperCamelCase__: Optional[int]=None , ):
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : Tuple = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : Optional[int] = use_labels
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : int = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase__ : Dict = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Optional[Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[Any] = num_patches + 1
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : int = None
if self.use_labels:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Union[str, Any] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any , UpperCamelCase__: Dict ):
lowerCamelCase__ : str = TFViTModel(config=UpperCamelCase__ )
lowerCamelCase__ : Dict = model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase__ : Tuple = self.image_size // 2
lowerCamelCase__ : Dict = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase__ : Dict = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ , training=UpperCamelCase__ )
lowerCamelCase__ : int = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : List[str] = self.type_sequence_label_size
lowerCamelCase__ : Optional[Any] = TFViTForImageClassification(UpperCamelCase__ )
lowerCamelCase__ : Tuple = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase__ : str = self.image_size // 2
lowerCamelCase__ : str = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase__ : str = model(UpperCamelCase__ , interpolate_pos_encoding=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : Optional[Any] = 1
lowerCamelCase__ : Tuple = TFViTForImageClassification(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : str = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = config_and_inputs
lowerCamelCase__ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
a = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
a = False
a = False
a = False
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[str] = TFViTModelTester(self )
lowerCamelCase__ : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase__ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(UpperCamelCase__ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : List[str] = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Any ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# forward pass
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : int = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Dict = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
| 41 |
"""simple docstring"""
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
if n == 0:
return 0
__A : int = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + naive_cut_rod_recursive(n - i ,snake_case_ ) )
return max_revue
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->int:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
__A : Dict = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_ ,snake_case_ ,snake_case_ )
def __lowercase ( snake_case_ : int ,snake_case_ : list ,snake_case_ : list ) ->Any:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__A : Any = float('''-inf''' )
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max(
snake_case_ ,prices[i - 1] + _top_down_cut_rod_recursive(n - i ,snake_case_ ,snake_case_ ) ,)
__A : Any = max_revenue
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Any:
'''simple docstring'''
_enforce_args(snake_case_ ,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__A : Union[str, Any] = [float('''-inf''' ) for _ in range(n + 1 )]
__A : List[Any] = 0
for i in range(1 ,n + 1 ):
__A : Union[str, Any] = max_rev[i]
for j in range(1 ,i + 1 ):
__A : str = max(snake_case_ ,prices[j - 1] + max_rev[i - j] )
__A : List[str] = max_revenue_i
return max_rev[n]
def __lowercase ( snake_case_ : int ,snake_case_ : list ) ->Union[str, Any]:
'''simple docstring'''
if n < 0:
__A : Union[str, Any] = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
__A : List[Any] = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(snake_case_ )}"""
)
raise ValueError(snake_case_ )
def __lowercase ( ) ->str:
'''simple docstring'''
__A : Any = [6, 10, 12, 15, 20, 23]
__A : Union[str, Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__A : str = 36
__A : Any = top_down_cut_rod(snake_case_ ,snake_case_ )
__A : Any = bottom_up_cut_rod(snake_case_ ,snake_case_ )
__A : Optional[Any] = naive_cut_rod_recursive(snake_case_ ,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 179 | 0 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=0 ) ->Dict:
"""simple docstring"""
if name is None:
lowercase : Optional[int] = None
else:
lowercase : Optional[int] = '''.''' * max(0, spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}'''
lowercase : Optional[Any] = fmt.format(_UpperCamelCase )
# Print and recurse (if needed).
if isinstance(_UpperCamelCase, _UpperCamelCase ):
if msg is not None:
print(_UpperCamelCase )
for k in val.keys():
recursive_print(_UpperCamelCase, val[k], spaces + 2 )
elif isinstance(_UpperCamelCase, torch.Tensor ):
print(_UpperCamelCase, ''':''', val.size() )
else:
print(_UpperCamelCase, ''':''', _UpperCamelCase )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[Any]:
"""simple docstring"""
lowercase : Dict = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowercase : Optional[int] = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowercase : List[Any] = param.view(*_UpperCamelCase )
lowercase : List[Any] = param.transpose(0, 2 )
lowercase : Any = param.transpose(1, 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowercase : Dict = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowercase : Optional[int] = param.view(*_UpperCamelCase )
lowercase : str = param.transpose(0, 1 ).contiguous()
lowercase : Union[str, Any] = param.view(*_UpperCamelCase )
return param
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : List[str] = {}
# old versions did not store training args
lowercase : int = input_state_dict.get('''args''', _UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowercase : str = ds_args.padded_vocab_size
lowercase : Tuple = ds_args.max_position_embeddings
lowercase : Union[str, Any] = ds_args.hidden_size
lowercase : Dict = ds_args.num_layers
lowercase : Optional[int] = ds_args.num_attention_heads
lowercase : Any = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowercase : Optional[Any] = config.n_head
# The hidden_size per head.
lowercase : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowercase : Any = input_state_dict['''checkpoint_version''']
else:
lowercase : Dict = 0.0
# The model.
lowercase : List[str] = input_state_dict['''model''']
# The language model.
lowercase : Tuple = model['''language_model''']
# The embeddings.
lowercase : Any = lm['''embedding''']
# The word embeddings.
lowercase : List[str] = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
lowercase : Optional[int] = word_embeddings[: config.vocab_size, :]
lowercase : Any = word_embeddings
# The position embeddings.
lowercase : Optional[int] = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowercase : str = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
lowercase : Optional[int] = pos_embeddings
# The transformer.
lowercase : Tuple = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
lowercase : str = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' )
# The simple map of names for "automated" rules.
lowercase : Optional[int] = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowercase : Tuple = layer_re.match(_UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowercase : Union[str, Any] = int(m.group(1 ) )
# The name of the operation.
lowercase : Dict = m.group(2 )
# Is it a weight or a bias?
lowercase : Optional[Any] = m.group(3 )
# The name of the layer.
lowercase : List[Any] = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm''' ):
lowercase : str = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2'''
lowercase : Tuple = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowercase : int = torch.tril(torch.ones((n_positions, n_positions), dtype=torch.floataa ) ).view(
1, 1, _UpperCamelCase, _UpperCamelCase )
lowercase : int = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowercase : Any = torch.tensor(-1e4, dtype=torch.floataa )
lowercase : Union[str, Any] = masked_bias
lowercase : Optional[Any] = fix_query_key_value_ordering(_UpperCamelCase, _UpperCamelCase, 3, _UpperCamelCase, _UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowercase : Optional[int] = out_val.transpose(0, 1 ).contiguous()
# Store.
lowercase : Optional[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowercase : List[str] = fix_query_key_value_ordering(_UpperCamelCase, _UpperCamelCase, 3, _UpperCamelCase, _UpperCamelCase )
# Store. No change of shape.
lowercase : int = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowercase : str = megatron_to_transformers[op_name]
lowercase : List[str] = val.transpose(0, 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowercase : str = megatron_to_transformers[op_name]
lowercase : int = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowercase : Optional[Any] = transformer['''final_layernorm.weight''']
lowercase : Dict = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
lowercase : int = word_embeddings
# It should be done!
return output_state_dict
def __lowercase ( ) ->Optional[Any]:
"""simple docstring"""
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''', action='''store_true''' )
parser.add_argument(
'''path_to_checkpoint''', type=_UpperCamelCase, help='''Path to the checkpoint file (.zip archive or direct .pt file)''', )
parser.add_argument(
'''--config_file''', default='''''', type=_UpperCamelCase, help='''An optional config json file describing the pre-trained model.''', )
lowercase : int = parser.parse_args()
# Extract the basename.
lowercase : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith('''.zip''' ):
with zipfile.ZipFile(args.path_to_checkpoint, '''r''' ) as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict:
lowercase : str = torch.load(_UpperCamelCase, map_location='''cpu''' )
else:
lowercase : Optional[int] = torch.load(args.path_to_checkpoint, map_location='''cpu''' )
lowercase : int = input_state_dict.get('''args''', _UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowercase : Optional[Any] = '''gelu_fast'''
elif ds_args.openai_gelu:
lowercase : List[Any] = '''gelu_new'''
else:
lowercase : Any = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
lowercase : Optional[Any] = '''gelu_new'''
# Spell out all parameters in case the defaults change.
lowercase : List[Any] = GPTaConfig(
vocab_size=50257, n_positions=1024, n_embd=1024, n_layer=24, n_head=16, n_inner=4096, activation_function=_UpperCamelCase, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.0_2, summary_type='''cls_index''', summary_use_proj=_UpperCamelCase, summary_activation=_UpperCamelCase, summary_proj_to_labels=_UpperCamelCase, summary_first_dropout=0.1, scale_attn_weights=_UpperCamelCase, use_cache=_UpperCamelCase, bos_token_id=50256, eos_token_id=50256, )
else:
lowercase : int = GPTaConfig.from_json_file(args.config_file )
lowercase : str = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''' )
lowercase : Optional[Any] = convert_megatron_checkpoint(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_UpperCamelCase, _UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowercase : Optional[int] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowercase : Optional[Any] = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
lowercase : List[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
lowercase : Dict = '''gpt2'''
lowercase : Dict = AutoTokenizer.from_pretrained(_UpperCamelCase )
lowercase : Dict = type(_UpperCamelCase ).__name__
lowercase : List[Any] = tokenizer_class
# Store the config to file.
print('''Saving config''' )
config.save_pretrained(_UpperCamelCase )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_UpperCamelCase )
# Store the state_dict to file.
lowercase : Any = os.path.join(_UpperCamelCase, '''pytorch_model.bin''' )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_UpperCamelCase, _UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 361 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__a = 50_00_00
__a , __a = os.path.split(__file__)
__a = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Any:
"""simple docstring"""
lowercase : Optional[Any] = dataset.map(**_UpperCamelCase )
@get_duration
def __lowercase ( _UpperCamelCase, **_UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : int = dataset.filter(**_UpperCamelCase )
def __lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Dict = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
lowercase : List[str] = generate_example_dataset(
os.path.join(_UpperCamelCase, '''dataset.arrow''' ), _UpperCamelCase, num_examples=_UpperCamelCase )
lowercase : List[Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=_UpperCamelCase )
def tokenize(_UpperCamelCase ):
return tokenizer(examples['''text'''] )
lowercase : Union[str, Any] = map(_UpperCamelCase )
lowercase : Dict = map(_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''numpy''' ):
lowercase : Dict = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''pandas''' ):
lowercase : Any = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
lowercase : str = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
lowercase : Tuple = map(_UpperCamelCase, function=lambda _UpperCamelCase : None, batched=_UpperCamelCase )
lowercase : List[str] = map(_UpperCamelCase, function=_UpperCamelCase, batched=_UpperCamelCase )
lowercase : Any = filter(_UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCamelCase, '''wb''' ) as f:
f.write(json.dumps(_UpperCamelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 173 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : List[Any] = 10**12 ):
'''simple docstring'''
_lowerCAmelCase = 1
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 158 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCAmelCase__(__snake_case ) -> int: # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCAmelCase__() -> Any:
'''simple docstring'''
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase__ = [1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('''unsupported backend''' ):
map_nested(__snake_case ,__snake_case ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' ,[2, -1] )
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = [1, 2]
lowerCamelCase__ = {'''a''': 1, '''b''': 2}
lowerCamelCase__ = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCamelCase__ = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCamelCase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCamelCase__ = [2, 3]
lowerCamelCase__ = {'''a''': 2, '''b''': 3}
lowerCamelCase__ = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCamelCase__ = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCamelCase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case ,__snake_case ,num_proc=__snake_case ) == expected_map_nested_sa
| 209 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : int = ["input_values", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 1_6000, SCREAMING_SNAKE_CASE_ = 0.0, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = 80, SCREAMING_SNAKE_CASE_ = 16, SCREAMING_SNAKE_CASE_ = 64, SCREAMING_SNAKE_CASE_ = "hann_window", SCREAMING_SNAKE_CASE_ = 1.0, SCREAMING_SNAKE_CASE_ = 80, SCREAMING_SNAKE_CASE_ = 7600, SCREAMING_SNAKE_CASE_ = 1e-10, SCREAMING_SNAKE_CASE_ = 2, SCREAMING_SNAKE_CASE_ = True, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__(feature_size=SCREAMING_SNAKE_CASE_, sampling_rate=SCREAMING_SNAKE_CASE_, padding_value=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = do_normalize
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Union[str, Any] = num_mel_bins
UpperCamelCase : int = hop_length
UpperCamelCase : Any = win_length
UpperCamelCase : Dict = win_function
UpperCamelCase : Any = frame_signal_scale
UpperCamelCase : str = fmin
UpperCamelCase : int = fmax
UpperCamelCase : Dict = mel_floor
UpperCamelCase : Any = reduction_factor
UpperCamelCase : List[str] = win_length * sampling_rate // 1000
UpperCamelCase : Union[str, Any] = hop_length * sampling_rate // 1000
UpperCamelCase : Tuple = optimal_fft_length(self.sample_size )
UpperCamelCase : int = (self.n_fft // 2) + 1
UpperCamelCase : Any = window_function(window_length=self.sample_size, name=self.win_function, periodic=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs, num_mel_filters=self.num_mel_bins, min_frequency=self.fmin, max_frequency=self.fmax, sampling_rate=self.sampling_rate, norm='slaney', mel_scale='slaney', )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers', SCREAMING_SNAKE_CASE_, )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers', SCREAMING_SNAKE_CASE_, )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
UpperCamelCase : Dict = np.array(SCREAMING_SNAKE_CASE_, np.intaa )
UpperCamelCase : int = []
for vector, length in zip(SCREAMING_SNAKE_CASE_, attention_mask.sum(-1 ) ):
UpperCamelCase : Any = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase : Optional[Any] = padding_value
normed_input_values.append(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : str = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, ) -> np.ndarray:
UpperCamelCase : int = spectrogram(
SCREAMING_SNAKE_CASE_, window=self.window, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, mel_filters=self.mel_filters, mel_floor=self.mel_floor, log_mel='log10', )
return log_mel_spec.T
def __call__( self, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
UpperCamelCase : Dict = self._process_audio(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
else:
UpperCamelCase : str = None
if audio_target is not None:
UpperCamelCase : str = self._process_audio(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
if inputs is None:
return inputs_target
else:
UpperCamelCase : Dict = inputs_target['input_values']
UpperCamelCase : str = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
UpperCamelCase : str = decoder_attention_mask
return inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> BatchFeature:
UpperCamelCase : Any = isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Any = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_, (list, tuple) ) and (isinstance(speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ):
UpperCamelCase : int = np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : Any = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : str = [speech]
# needed to make pad() work on spectrogram inputs
UpperCamelCase : Optional[Any] = self.feature_size
# convert into correct format for padding
if is_target:
UpperCamelCase : Optional[int] = [self._extract_mel_features(SCREAMING_SNAKE_CASE_ ) for waveform in speech]
UpperCamelCase : Union[str, Any] = BatchFeature({'input_values': features} )
UpperCamelCase : List[str] = self.num_mel_bins
else:
UpperCamelCase : Dict = BatchFeature({'input_values': speech} )
UpperCamelCase : Tuple = self.pad(
SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, pad_to_multiple_of=SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : int = feature_size_hack
# convert input values to correct format
UpperCamelCase : Optional[int] = padded_inputs['input_values']
if not isinstance(input_values[0], np.ndarray ):
UpperCamelCase : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.floataa ) for array in input_values]
elif (
not isinstance(SCREAMING_SNAKE_CASE_, np.ndarray )
and isinstance(input_values[0], np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCamelCase : Optional[int] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(SCREAMING_SNAKE_CASE_, np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCamelCase : Dict = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCamelCase : Dict = padded_inputs.get('attention_mask' )
if attention_mask is not None:
UpperCamelCase : int = [np.asarray(SCREAMING_SNAKE_CASE_, dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCamelCase : Dict = (
attention_mask
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCamelCase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['input_values'], attention_mask=SCREAMING_SNAKE_CASE_, padding_value=self.padding_value )
if return_tensors is not None:
UpperCamelCase : int = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
def snake_case_ ( self ) -> Dict[str, Any]:
UpperCamelCase : Any = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCamelCase : Any = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 103 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
__UpperCAmelCase : str = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
__UpperCAmelCase : Union[str, Any] = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__UpperCAmelCase : List[Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
__UpperCAmelCase : Dict = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(lowerCAmelCase__ )-1}' )
if "norm" in key:
__UpperCAmelCase : Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__UpperCAmelCase : Any = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
__UpperCAmelCase : Union[str, Any] = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(lowerCAmelCase__ )-1}' )
if "layer_norm1" in key:
__UpperCAmelCase : Dict = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
__UpperCAmelCase : Union[str, Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
__UpperCAmelCase : List[Any] = key[key.find("""block""" ) + len("""block""" )]
__UpperCAmelCase : Dict = key.replace(f'block{idx}' , f'block.{int(lowerCAmelCase__ )-1}' )
if "attn.q" in key:
__UpperCAmelCase : Any = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
__UpperCAmelCase : Union[str, Any] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
__UpperCAmelCase : str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
__UpperCAmelCase : Optional[int] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
__UpperCAmelCase : List[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
__UpperCAmelCase : int = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
__UpperCAmelCase : Optional[int] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
__UpperCAmelCase : Dict = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__UpperCAmelCase : Tuple = key[key.find("""linear_c""" ) + len("""linear_c""" )]
__UpperCAmelCase : Any = key.replace(f'linear_c{idx}' , f'linear_c.{int(lowerCAmelCase__ )-1}' )
if "bot_conv" in key:
__UpperCAmelCase : Dict = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
__UpperCAmelCase : Optional[int] = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
__UpperCAmelCase : Tuple = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
__UpperCAmelCase : List[str] = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
__UpperCAmelCase : str = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
__UpperCAmelCase : Tuple = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
__UpperCAmelCase : List[str] = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
__UpperCAmelCase : str = key.replace("""module.last_layer_depth""" , """head.head""" )
__UpperCAmelCase : List[Any] = value
return new_state_dict
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__UpperCAmelCase : Optional[int] = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
__UpperCAmelCase : str = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
__UpperCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
__UpperCAmelCase : List[Any] = kv_bias[: config.hidden_sizes[i]]
__UpperCAmelCase : List[str] = kv_weight[
config.hidden_sizes[i] :, :
]
__UpperCAmelCase : Any = kv_bias[config.hidden_sizes[i] :]
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Optional[int] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def lowercase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : List[str]=None ):
"""simple docstring"""
__UpperCAmelCase : List[str] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
__UpperCAmelCase : Dict = GLPNImageProcessor()
# prepare image
__UpperCAmelCase : Union[str, Any] = prepare_img()
__UpperCAmelCase : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
__UpperCAmelCase : Dict = torch.load(lowerCAmelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
__UpperCAmelCase : str = rename_keys(lowerCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
__UpperCAmelCase : List[str] = GLPNForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
__UpperCAmelCase : Dict = model(lowerCAmelCase__ )
__UpperCAmelCase : Dict = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
__UpperCAmelCase : Any = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
__UpperCAmelCase : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'Unknown model name: {model_name}' )
__UpperCAmelCase : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
_UpperCamelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 254 |
'''simple docstring'''
from collections.abc import Sequence
def lowercase_ ( lowerCAmelCase__ : Sequence[float] , lowerCAmelCase__ : float ):
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(lowerCAmelCase__ ) )
def lowercase_ ( lowerCAmelCase__ : Sequence[float] , lowerCAmelCase__ : float ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = 0.0
for coeff in reversed(lowerCAmelCase__ ):
__UpperCAmelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 254 | 1 |
import math
import random
def lowerCAmelCase__ ( a__: float , a__: bool = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCAmelCase__ :Optional[Any] = 0.02
def lowerCAmelCase__ ( a__: int , a__: int ) -> float:
'''simple docstring'''
_UpperCAmelCase = float(2 * (random.randint(1 , 1_0_0 )) - 1 )
for _ in range(a__ ):
# Forward propagation
_UpperCAmelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_UpperCAmelCase = (expected / 1_0_0) - layer_a
# Error delta
_UpperCAmelCase = layer_1_error * sigmoid_function(a__ , a__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ :List[Any] = int(input('''Expected value: '''))
lowerCAmelCase__ :Any = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 185 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase__ ( a__: str , a__: List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('RGB' )
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
_UpperCAmelCase = transform(a__ ).unsqueeze(0 ).to(a__ )
return image
def lowerCAmelCase__ ( a__: Optional[int] ) -> int:
'''simple docstring'''
if "visual_encoder" in key:
_UpperCAmelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , a__ )
if "blocks" in key:
_UpperCAmelCase = re.sub(R'blocks' , 'layers' , a__ )
if "attn" in key:
_UpperCAmelCase = re.sub(R'attn' , 'self_attn' , a__ )
if "norm1" in key:
_UpperCAmelCase = re.sub(R'norm1' , 'layer_norm1' , a__ )
if "norm2" in key:
_UpperCAmelCase = re.sub(R'norm2' , 'layer_norm2' , a__ )
if "encoder.norm" in key:
_UpperCAmelCase = re.sub(R'encoder.norm' , 'post_layernorm' , a__ )
if "encoder.patch_embed.proj" in key:
_UpperCAmelCase = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , a__ )
if "encoder.pos_embed" in key:
_UpperCAmelCase = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , a__ )
if "encoder.cls_token" in key:
_UpperCAmelCase = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , a__ )
if "self_attn" in key:
_UpperCAmelCase = re.sub(R'self_attn.proj' , 'self_attn.projection' , a__ )
return key
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[Any] , a__: List[str]=None ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = BlipConfig.from_pretrained(a__ )
else:
_UpperCAmelCase = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
_UpperCAmelCase = BlipForConditionalGeneration(a__ ).eval()
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
_UpperCAmelCase = blip_decoder(pretrained=a__ , image_size=3_8_4 , vit='base' )
_UpperCAmelCase = pt_model.eval()
_UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(a__ )
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = value
hf_model.load_state_dict(a__ )
_UpperCAmelCase = 3_8_4
_UpperCAmelCase = load_demo_image(image_size=a__ , device='cpu' )
_UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_UpperCAmelCase = tokenizer(['a picture of'] ).input_ids
_UpperCAmelCase = hf_model.generate(a__ , a__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
_UpperCAmelCase = hf_model.generate(a__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(a__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCAmelCase = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
_UpperCAmelCase = blip_vqa(pretrained=a__ , image_size=a__ , vit='base' )
vqa_model.eval()
_UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(a__ )
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForQuestionAnswering(a__ )
hf_vqa_model.load_state_dict(a__ )
_UpperCAmelCase = ['How many dogs are in this image?']
_UpperCAmelCase = tokenizer(a__ , return_tensors='pt' ).input_ids
_UpperCAmelCase = hf_vqa_model.generate(a__ , a__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
_UpperCAmelCase = blip_itm(pretrained=a__ , image_size=a__ , vit='base' )
itm_model.eval()
_UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(a__ )
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForImageTextRetrieval(a__ )
_UpperCAmelCase = ['A picture of a woman with a dog sitting in a beach']
_UpperCAmelCase = tokenizer(
a__ , return_tensors='pt' , padding='max_length' , truncation=a__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(a__ )
hf_itm_model.eval()
_UpperCAmelCase = hf_itm_model(a__ , a__ , use_itm_head=a__ )
_UpperCAmelCase = hf_itm_model(a__ , a__ , use_itm_head=a__ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase__ :int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase__ :List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 185 | 1 |
from ..utils import DummyObject, requires_backends
class a (metaclass=_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = ["flax", "transformers"]
def __init__( self : int , *lowerCamelCase : List[str] , **lowerCamelCase : int ) -> int:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def __snake_case ( cls : Union[str, Any] , *lowerCamelCase : List[str] , **lowerCamelCase : int ) -> Union[str, Any]:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def __snake_case ( cls : Optional[Any] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : str ) -> str:
requires_backends(cls , ["flax", "transformers"] )
class a (metaclass=_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : int = ["flax", "transformers"]
def __init__( self : List[str] , *lowerCamelCase : Dict , **lowerCamelCase : Dict ) -> List[Any]:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def __snake_case ( cls : List[Any] , *lowerCamelCase : int , **lowerCamelCase : Tuple ) -> Any:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def __snake_case ( cls : str , *lowerCamelCase : Tuple , **lowerCamelCase : Dict ) -> str:
requires_backends(cls , ["flax", "transformers"] )
class a (metaclass=_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ["flax", "transformers"]
def __init__( self : Optional[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Tuple ) -> Optional[Any]:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def __snake_case ( cls : Any , *lowerCamelCase : Dict , **lowerCamelCase : Optional[Any] ) -> List[Any]:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def __snake_case ( cls : Optional[int] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : List[str] ) -> int:
requires_backends(cls , ["flax", "transformers"] )
class a (metaclass=_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = ["flax", "transformers"]
def __init__( self : List[Any] , *lowerCamelCase : int , **lowerCamelCase : Tuple ) -> Optional[int]:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def __snake_case ( cls : Dict , *lowerCamelCase : Tuple , **lowerCamelCase : str ) -> Optional[Any]:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def __snake_case ( cls : List[str] , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[Any] ) -> Optional[int]:
requires_backends(cls , ["flax", "transformers"] )
| 123 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_snake_case : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , ) -> Dict:
super().__init__()
self.register_modules(
vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def __snake_case ( self : str ) -> List[str]:
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Dict , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , lowerCamelCase : Optional[torch.FloatTensor] = None , **lowerCamelCase : Any , ) -> Optional[Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Optional[int] = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : Tuple = len(lowerCamelCase )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase , lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(lowerCamelCase )}.' )
# get prompt text embeddings
__snake_case : Tuple = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__snake_case : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : int = text_embeddings.shape
__snake_case : Any = text_embeddings.repeat(1 , lowerCamelCase , 1 )
__snake_case : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : Any = [""]
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='
F' {type(lowerCamelCase )}.' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : int = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
" the batch size of `prompt`." )
else:
__snake_case : Tuple = negative_prompt
__snake_case : str = text_input_ids.shape[-1]
__snake_case : Dict = self.tokenizer(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="pt" , )
__snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : Tuple = uncond_embeddings.shape[1]
__snake_case : Any = uncond_embeddings.repeat(lowerCamelCase , lowerCamelCase , 1 )
__snake_case : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : Optional[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Optional[Any] = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(self.device )
__snake_case : int = torch.randn(lowerCamelCase , generator=lowerCamelCase , device="cpu" , dtype=lowerCamelCase ).to(
self.device )
else:
__snake_case : Union[str, Any] = torch.randn(
lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__snake_case : int = torch.randn(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__snake_case : Union[str, Any] = latents_reference.to(self.device )
__snake_case : List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case : Union[str, Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case : str = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case : List[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case : Tuple = 0 if dx < 0 else dx
__snake_case : Union[str, Any] = 0 if dy < 0 else dy
__snake_case : Any = max(-dx , 0 )
__snake_case : Optional[int] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case : List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : Optional[Any] = {}
if accepts_eta:
__snake_case : List[Any] = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : List[Any] = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__snake_case : str = self.unet(lowerCamelCase , lowerCamelCase , encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : str = noise_pred.chunk(2 )
__snake_case : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : List[Any] = 1 / 0.1_82_15 * latents
__snake_case : Dict = self.vae.decode(lowerCamelCase ).sample
__snake_case : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case : Union[str, Any] = self.feature_extractor(self.numpy_to_pil(lowerCamelCase ) , return_tensors="pt" ).to(
self.device )
__snake_case , __snake_case : str = self.safety_checker(
images=lowerCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case : Dict = None
if output_type == "pil":
__snake_case : Any = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCamelCase , nsfw_content_detected=lowerCamelCase )
| 123 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionXLImgaImgPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
SCREAMING_SNAKE_CASE_ = PipelineTesterMixin.required_optional_params - {'latents'}
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,attention_head_dim=(2, 4) ,use_linear_projection=__lowerCamelCase ,addition_embed_type='''text_time''' ,addition_time_embed_dim=8 ,transformer_layers_per_block=(1, 2) ,projection_class_embeddings_input_dim=80 ,cross_attention_dim=64 ,)
a = EulerDiscreteScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,steps_offset=1 ,beta_schedule='''scaled_linear''' ,timestep_spacing='''leading''' ,)
torch.manual_seed(0 )
a = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act='''gelu''' ,projection_dim=32 ,)
a = CLIPTextModel(__lowerCamelCase )
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ,local_files_only=__lowerCamelCase )
a = CLIPTextModelWithProjection(__lowerCamelCase )
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ,local_files_only=__lowerCamelCase )
a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int]=0 ):
'''simple docstring'''
a = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
a = image / 2 + 0.5
if str(__lowerCamelCase ).startswith('''mps''' ):
a = torch.manual_seed(__lowerCamelCase )
else:
a = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a = self.get_dummy_components()
a = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase )
a = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
a = self.get_dummy_inputs(__lowerCamelCase )
a = sd_pipe(**__lowerCamelCase ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = self.get_dummy_components()
a = StableDiffusionXLImgaImgPipeline(**__lowerCamelCase )
a = sd_pipe.to(__lowerCamelCase )
a = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
# forward without prompt embeds
a = self.get_dummy_inputs(__lowerCamelCase )
a = 3 * ['''this is a negative prompt''']
a = negative_prompt
a = 3 * [inputs['''prompt''']]
a = sd_pipe(**__lowerCamelCase )
a = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
a = self.get_dummy_inputs(__lowerCamelCase )
a = 3 * ['''this is a negative prompt''']
a = 3 * [inputs.pop('''prompt''' )]
(
(
a
) , (
a
) , (
a
) , (
a
) ,
) = sd_pipe.encode_prompt(__lowerCamelCase ,negative_prompt=__lowerCamelCase )
a = sd_pipe(
**__lowerCamelCase ,prompt_embeds=__lowerCamelCase ,negative_prompt_embeds=__lowerCamelCase ,pooled_prompt_embeds=__lowerCamelCase ,negative_pooled_prompt_embeds=__lowerCamelCase ,)
a = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any]="cpu" ,__lowerCamelCase : Union[str, Any]=torch.floataa ,__lowerCamelCase : Union[str, Any]=0 ):
'''simple docstring'''
a = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
a = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
a = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase ,dtype=__lowerCamelCase )
a = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
a = self.get_inputs(__lowerCamelCase )
a = pipe(**__lowerCamelCase ).images
a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
a = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 330 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
UpperCamelCase__ : Union[str, Any] = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
UpperCamelCase__ : str = {
"""jukebox""": 512,
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_LYRIC_TOKENS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any]=["v3", "v2", "v2"] ,__lowerCamelCase : List[Any]=5_12 ,__lowerCamelCase : Tuple=5 ,__lowerCamelCase : List[Any]="<|endoftext|>" ,**__lowerCamelCase : List[str] ,):
'''simple docstring'''
a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token
super().__init__(
unk_token=__lowerCamelCase ,n_genres=__lowerCamelCase ,version=__lowerCamelCase ,max_n_lyric_tokens=__lowerCamelCase ,**__lowerCamelCase ,)
a = version
a = max_n_lyric_tokens
a = n_genres
with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle:
a = json.load(__lowerCamelCase )
with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle:
a = json.load(__lowerCamelCase )
with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle:
a = json.load(__lowerCamelCase )
a = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
a = oov.replace(r'''\-\'''' ,r'''\-+\'''' )
a = regex.compile(__lowerCamelCase )
a = {v: k for k, v in self.artists_encoder.items()}
a = {v: k for k, v in self.genres_encoder.items()}
a = {v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ):
'''simple docstring'''
a = [self.artists_encoder.get(__lowerCamelCase ,0 ) for artist in list_artists]
for genres in range(len(__lowerCamelCase ) ):
a = [self.genres_encoder.get(__lowerCamelCase ,0 ) for genre in list_genres[genres]]
a = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
a = [[self.lyrics_encoder.get(__lowerCamelCase ,0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : List[str] ):
'''simple docstring'''
return list(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a , a , a = self.prepare_for_tokenization(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
a = self._tokenize(__lowerCamelCase )
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : bool = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
a = artists[idx].lower()
a = [genres[idx].lower()]
else:
a = self._normalize(artists[idx] ) + '''.v2'''
a = [
self._normalize(__lowerCamelCase ) + '''.v2''' for genre in genres[idx].split('''_''' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' )
a = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
a = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )}
a = 0
a = len(__lowerCamelCase ) + 1
a = self.vocab
a = {v: k for k, v in self.vocab.items()}
a = ''''''
else:
a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' )
a = self._run_strip_accents(__lowerCamelCase )
a = lyrics.replace('''\\''' ,'''\n''' )
a = self.out_of_vocab.sub('''''' ,__lowerCamelCase ), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : int ):
'''simple docstring'''
a = unicodedata.normalize('''NFD''' ,__lowerCamelCase )
a = []
for char in text:
a = unicodedata.category(__lowerCamelCase )
if cat == "Mn":
continue
output.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str ):
'''simple docstring'''
a = (
[chr(__lowerCamelCase ) for i in range(ord('''a''' ) ,ord('''z''' ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord('''A''' ) ,ord('''Z''' ) + 1 )]
+ [chr(__lowerCamelCase ) for i in range(ord('''0''' ) ,ord('''9''' ) + 1 )]
+ ['''.''']
)
a = frozenset(__lowerCamelCase )
a = re.compile(r'''_+''' )
a = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] )
a = pattern.sub('''_''' ,__lowerCamelCase ).strip('''_''' )
return text
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[str] ):
'''simple docstring'''
return " ".join(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : bool = False ):
'''simple docstring'''
if not isinstance(__lowerCamelCase ,__lowerCamelCase ):
a = TensorType(__lowerCamelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' )
import tensorflow as tf
a = tf.constant
a = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' )
import torch
a = torch.tensor
a = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' )
import jax.numpy as jnp # noqa: F811
a = jnp.array
a = _is_jax
else:
a = np.asarray
a = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
a = [inputs]
if not is_tensor(__lowerCamelCase ):
a = as_tensor(__lowerCamelCase )
except: # noqa E722
raise ValueError(
'''Unable to create tensor, you should probably activate truncation and/or padding '''
'''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' )
return inputs
def __call__( self : Tuple ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str]="" ,__lowerCamelCase : List[Any]="pt" ):
'''simple docstring'''
a = [0, 0, 0]
a = [artist] * len(self.version )
a = [genres] * len(self.version )
a , a , a = self.tokenize(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
a , a , a = self._convert_token_to_id(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
a = [-INFINITY] * len(full_tokens[-1] )
a = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=__lowerCamelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} )
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.artists_encoder ,ensure_ascii=__lowerCamelCase ) )
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.genres_encoder ,ensure_ascii=__lowerCamelCase ) )
a = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=__lowerCamelCase ) )
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : str ):
'''simple docstring'''
a = self.artists_decoder.get(__lowerCamelCase )
a = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index]
a = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index]
return artist, genres, lyrics
| 330 | 1 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_a = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_a = get_tests_dir('fixtures/vocab.json')
_a = get_tests_dir('fixtures')
class A_ (unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 0
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Dict = WavaVecaConfig()
UpperCAmelCase_ : Union[str, Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
UpperCAmelCase_ : Optional[Any] = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
copyfile(lowercase_ , os.path.join(lowercase_ , "vocab.json" ) )
UpperCAmelCase_ : Dict = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : str = WavaVecaFeatureExtractor()
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
UpperCAmelCase_ : List[str] = WavaVecaProcessor(lowercase_ , lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowercase_ , lowercase_ ) , "r" ) as f:
UpperCAmelCase_ : Optional[int] = json.load(lowercase_ )
config_dict.pop("processor_class" )
with open(os.path.join(lowercase_ , lowercase_ ) , "w" ) as f:
f.write(json.dumps(lowercase_ ) )
UpperCAmelCase_ : List[Any] = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : str = WavaVecaFeatureExtractor()
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
UpperCAmelCase_ : List[Any] = WavaVecaProcessor(lowercase_ , lowercase_ )
# save in new folder
processor.save_pretrained(lowercase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowercase_ , lowercase_ ) , "r" ) as f:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
config_dict.pop("processor_class" )
with open(os.path.join(lowercase_ , lowercase_ ) , "w" ) as f:
f.write(json.dumps(lowercase_ ) )
UpperCAmelCase_ : List[Any] = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Union[str, Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(lowercase_ )
# copy relevant files
copyfile(lowercase_ , os.path.join(lowercase_ , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(lowercase_ , lowercase_ ) , "w" ) as f:
f.write("{}" )
UpperCAmelCase_ : List[str] = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : List[str] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : Dict = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowercase_ )
UpperCAmelCase_ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowercase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
UpperCAmelCase_ : List[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
UpperCAmelCase_ : Dict = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCAmelCase_ : Optional[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowercase_ , use_fast=lowercase_ )
UpperCAmelCase_ : Dict = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_ , lowercase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase_ ):
AutoProcessor.register(lowercase_ , lowercase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase_ : Tuple = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Any = os.path.join(lowercase_ , "vocab.txt" )
with open(lowercase_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Union[str, Any] = CustomTokenizer(lowercase_ )
UpperCAmelCase_ : Any = CustomProcessor(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = AutoProcessor.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self ):
"""simple docstring"""
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = False
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = """AutoFeatureExtractor"""
SCREAMING_SNAKE_CASE__ : List[str] = """AutoTokenizer"""
SCREAMING_SNAKE_CASE__ : Any = False
try:
AutoConfig.register("custom" , lowercase_ )
AutoFeatureExtractor.register(lowercase_ , lowercase_ )
AutoTokenizer.register(lowercase_ , slow_tokenizer_class=lowercase_ )
AutoProcessor.register(lowercase_ , lowercase_ )
# If remote code is not set, the default is to use local classes.
UpperCAmelCase_ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
UpperCAmelCase_ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
UpperCAmelCase_ : Optional[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowercase_ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class A_ (unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , "test-processor" ) , push_to_hub=lowercase_ , use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = WavaVecaProcessor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowercase_ , "test-processor-org" ) , push_to_hub=lowercase_ , use_auth_token=self._token , organization="valid_org" , )
UpperCAmelCase_ : Union[str, Any] = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowercase_ , getattr(new_processor.feature_extractor , lowercase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
UpperCAmelCase_ : Union[str, Any] = CustomFeatureExtractor.from_pretrained(lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = os.path.join(lowercase_ , "vocab.txt" )
with open(lowercase_ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase_ : Union[str, Any] = CustomTokenizer(lowercase_ )
UpperCAmelCase_ : Any = CustomProcessor(lowercase_ , lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
UpperCAmelCase_ : Optional[Any] = Repository(lowercase_ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(lowercase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowercase_ , "tokenizer_config.json" ) ) as f:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowercase_ , "custom_processing.py" ) ) )
repo.push_to_hub()
UpperCAmelCase_ : List[Any] = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 61 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class A_ :
'''simple docstring'''
pass
| 61 | 1 |
'''simple docstring'''
from math import factorial
def snake_case_ ( lowerCAmelCase_ = 20 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_UpperCAmelCase : int = n // 2
return int(factorial(lowerCAmelCase_ ) / (factorial(lowerCAmelCase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
A_ : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 349 |
'''simple docstring'''
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
@slow
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" ,"""prajjwal1/bert-tiny""" )
_UpperCAmelCase : List[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase : List[Any] = bertabert.config.encoder.vocab_size
_UpperCAmelCase : Optional[int] = tokenizer.sep_token_id
_UpperCAmelCase : Union[str, Any] = tokenizer.cls_token_id
_UpperCAmelCase : str = 128
_UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""train[:1%]""" )
_UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" ,"""3.0.0""" ,split="""validation[:1%]""" )
_UpperCAmelCase : Any = train_dataset.select(range(32 ) )
_UpperCAmelCase : Any = val_dataset.select(range(16 ) )
_UpperCAmelCase : List[Any] = 4
def _map_to_encoder_decoder_inputs(a_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_UpperCAmelCase : int = tokenizer(batch["""article"""] ,padding="""max_length""" ,truncation=a_ ,max_length=512 )
_UpperCAmelCase : Tuple = tokenizer(batch["""highlights"""] ,padding="""max_length""" ,truncation=a_ ,max_length=128 )
_UpperCAmelCase : int = inputs.input_ids
_UpperCAmelCase : Union[str, Any] = inputs.attention_mask
_UpperCAmelCase : Union[str, Any] = outputs.input_ids
_UpperCAmelCase : Dict = outputs.input_ids.copy()
_UpperCAmelCase : Dict = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_UpperCAmelCase : Optional[int] = outputs.attention_mask
assert all(len(a_ ) == 512 for x in inputs.input_ids )
assert all(len(a_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(a_ ):
_UpperCAmelCase : Optional[int] = pred.label_ids
_UpperCAmelCase : Optional[int] = pred.predictions
# all unnecessary tokens are removed
_UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : str = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ )
_UpperCAmelCase : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(a_ ) )] ) / len(a_ )
return {"accuracy": accuracy}
# map train dataset
_UpperCAmelCase : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
train_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
# same for validation dataset
_UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=a_ ,batch_size=a_ ,remove_columns=["""article""", """highlights"""] ,)
val_dataset.set_format(
type="""torch""" ,columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] ,)
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : List[str] = SeqaSeqTrainingArguments(
output_dir=a_ ,per_device_train_batch_size=a_ ,per_device_eval_batch_size=a_ ,predict_with_generate=a_ ,evaluation_strategy="""steps""" ,do_train=a_ ,do_eval=a_ ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
_UpperCAmelCase : int = SeqaSeqTrainer(
model=a_ ,args=a_ ,compute_metrics=_compute_metrics ,train_dataset=a_ ,eval_dataset=a_ ,tokenizer=a_ ,)
# start training
trainer.train()
| 349 | 1 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : list[int] , _a : list[int] , _a : int ):
UpperCAmelCase = list(range(len(_a ) ) )
UpperCAmelCase = [v / w for v, w in zip(_a , _a )]
index.sort(key=lambda _a : ratio[i] , reverse=_a )
UpperCAmelCase = 0
UpperCAmelCase = [0] * len(_a )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A ='pt'
elif is_tf_available():
A ='tf'
else:
A ='jax'
class _a ( __a , unittest.TestCase ):
__a : Optional[Any] = PerceiverTokenizer
__a : str = False
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : Optional[int] ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def A ( self : Union[str, Any] , **lowercase : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Tuple , lowercase : str , lowercase : List[str]=False , lowercase : Union[str, Any]=20 , lowercase : Union[str, Any]=5 ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(len(lowercase ) ):
try:
UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase = list(filter(lambda lowercase : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowercase ) )
UpperCAmelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
UpperCAmelCase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
UpperCAmelCase = ''' ''' + output_txt
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = '''Unicode €.'''
UpperCAmelCase = tokenizer(lowercase )
UpperCAmelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]Unicode €.[SEP]''' )
UpperCAmelCase = tokenizer('''e è é ê ë''' )
UpperCAmelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , lowercase )
# decoding
UpperCAmelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
UpperCAmelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
UpperCAmelCase = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowercase )
self.assertIn('''attention_mask''' , lowercase )
self.assertNotIn('''decoder_input_ids''' , lowercase )
self.assertNotIn('''decoder_attention_mask''' , lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
UpperCAmelCase = [
'''Summary of the text.''',
'''Another summary.''',
]
UpperCAmelCase = tokenizer(
text_target=lowercase , max_length=32 , padding='''max_length''' , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
UpperCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
UpperCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase )
UpperCAmelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
UpperCAmelCase = json.load(lowercase )
UpperCAmelCase = [f"<extra_id_{i}>" for i in range(125 )]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
UpperCAmelCase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowercase )]
UpperCAmelCase = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Dict ):
'''simple docstring'''
pass
def A ( self : str ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase )
| 34 | 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : Optional[int] = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , _UpperCAmelCase ).groups()[0]
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None ) -> List[str]:
lowerCamelCase__ : List[Any] = file_names
lowerCamelCase__ : Optional[int] = image_transform
lowerCamelCase__ : int = label_to_id
def __len__( self : Tuple ) -> Dict:
return len(self.file_names )
def __getitem__( self : Optional[Any] , UpperCAmelCase : List[Any] ) -> List[str]:
lowerCamelCase__ : int = self.file_names[idx]
lowerCamelCase__ : str = PIL.Image.open(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = raw_image.convert('RGB' )
if self.image_transform is not None:
lowerCamelCase__ : Optional[int] = self.image_transform(UpperCAmelCase )
lowerCamelCase__ : Tuple = extract_label(UpperCAmelCase )
if self.label_to_id is not None:
lowerCamelCase__ : List[str] = self.label_to_id[label]
return {"image": image, "label": label}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
# Initialize accelerator
if args.with_tracking:
lowerCamelCase__ : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCamelCase__ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Optional[int] = config['lr']
lowerCamelCase__ : Union[str, Any] = int(config['num_epochs'] )
lowerCamelCase__ : Any = int(config['seed'] )
lowerCamelCase__ : Union[str, Any] = int(config['batch_size'] )
lowerCamelCase__ : Any = config['image_size']
if not isinstance(_UpperCAmelCase , (list, tuple) ):
lowerCamelCase__ : Dict = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowerCamelCase__ : Dict = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase__ : Optional[int] = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCamelCase__ : Union[str, Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase__ : Optional[Any] = os.path.split(_UpperCAmelCase )[-1].split('.' )[0]
accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase )
# Grab all the image filenames
lowerCamelCase__ : Dict = [os.path.join(args.data_dir , _UpperCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowerCamelCase__ : Union[str, Any] = [extract_label(_UpperCAmelCase ) for fname in file_names]
lowerCamelCase__ : Any = list(set(_UpperCAmelCase ) )
id_to_label.sort()
lowerCamelCase__ : Optional[Any] = {lbl: i for i, lbl in enumerate(_UpperCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_UpperCAmelCase )
torch.manual_seed(_UpperCAmelCase )
torch.cuda.manual_seed_all(_UpperCAmelCase )
# Split our filenames between train and validation
lowerCamelCase__ : Dict = np.random.permutation(len(_UpperCAmelCase ) )
lowerCamelCase__ : Tuple = int(0.8 * len(_UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = random_perm[:cut]
lowerCamelCase__ : Optional[Any] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase__ : List[str] = Compose([RandomResizedCrop(_UpperCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase__ : List[str] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# For evaluation, we use a deterministic Resize
lowerCamelCase__ : Dict = Compose([Resize(_UpperCAmelCase ), ToTensor()] )
lowerCamelCase__ : Optional[int] = PetsDataset([file_names[i] for i in eval_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# Instantiate dataloaders.
lowerCamelCase__ : List[Any] = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
lowerCamelCase__ : int = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : List[Any] = create_model('resnet50d' , pretrained=_UpperCAmelCase , num_classes=len(_UpperCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : str = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase__ : Union[str, Any] = False
for param in model.get_classifier().parameters():
lowerCamelCase__ : Tuple = True
# We normalize the batches of images to be a bit faster.
lowerCamelCase__ : Tuple = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowerCamelCase__ : Optional[Any] = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : Any = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowerCamelCase__ : Dict = OneCycleLR(optimizer=_UpperCAmelCase , max_lr=_UpperCAmelCase , epochs=_UpperCAmelCase , steps_per_epoch=len(_UpperCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ : Any = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ : List[str] = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase__ : Optional[int] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase__ : Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase__ : Dict = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase__ : Dict = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase__ : Optional[int] = os.path.splitext(_UpperCAmelCase )[0]
if "epoch" in training_difference:
lowerCamelCase__ : Optional[int] = int(training_difference.replace('epoch_' , '' ) ) + 1
lowerCamelCase__ : Any = None
else:
lowerCamelCase__ : Optional[int] = int(training_difference.replace('step_' , '' ) )
lowerCamelCase__ : int = resume_step // len(_UpperCAmelCase )
resume_step -= starting_epoch * len(_UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase , _UpperCAmelCase ):
model.train()
if args.with_tracking:
lowerCamelCase__ : Optional[int] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase__ : Optional[int] = accelerator.skip_first_batches(_UpperCAmelCase , _UpperCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase__ : str = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : Optional[int] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : Tuple = (batch['image'] - mean) / std
lowerCamelCase__ : Union[str, Any] = model(_UpperCAmelCase )
lowerCamelCase__ : Any = torch.nn.functional.cross_entropy(_UpperCAmelCase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Dict = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase__ : List[Any] = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
model.eval()
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : List[str] = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase__ : List[str] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase__ : List[str] = (batch['image'] - mean) / std
with torch.no_grad():
lowerCamelCase__ : Any = model(_UpperCAmelCase )
lowerCamelCase__ : List[Any] = outputs.argmax(dim=-1 )
lowerCamelCase__ : int = accelerator.gather_for_metrics((predictions, batch['label']) )
lowerCamelCase__ : Optional[int] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase__ : Any = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(_UpperCAmelCase ),
'epoch': epoch,
} , step=_UpperCAmelCase , )
if checkpointing_steps == "epoch":
lowerCamelCase__ : Tuple = F"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCamelCase__ : Optional[Any] = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE ( ) -> int:
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_UpperCAmelCase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_UpperCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_UpperCAmelCase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCamelCase__ : Any = parser.parse_args()
lowerCamelCase__ : Any = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 364 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : List[Any] = get_failure_array(_UpperCAmelCase )
# 2) Step through text searching for pattern
lowerCamelCase__ , lowerCamelCase__ : List[str] = 0, 0 # index into text, pattern
while i < len(_UpperCAmelCase ):
if pattern[j] == text[i]:
if j == (len(_UpperCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowerCamelCase__ : str = failure[j - 1]
continue
i += 1
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
lowerCamelCase__ : int = [0]
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Any = 1
while j < len(_UpperCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowerCamelCase__ : int = failure[i - 1]
continue
j += 1
failure.append(_UpperCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
_UpperCAmelCase : Union[str, Any] = """abc1abc12"""
_UpperCAmelCase : List[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_UpperCAmelCase : Dict = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
_UpperCAmelCase : Any = """ABABX"""
_UpperCAmelCase : Union[str, Any] = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
_UpperCAmelCase : int = """AAAB"""
_UpperCAmelCase : str = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
_UpperCAmelCase : Optional[Any] = """abcdabcy"""
_UpperCAmelCase : List[Any] = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
_UpperCAmelCase : str = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 45 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
super().__init__()
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case , speech_processor=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , feature_extractor=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case = "auto" ):
if slice_size == "auto":
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def __call__( self , snake_case , snake_case=1_6000 , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ):
lowercase = self.speech_processor.feature_extractor(
snake_case , return_tensors='pt' , sampling_rate=snake_case ).input_features.to(self.device )
lowercase = self.speech_model.generate(snake_case , max_length=48_0000 )
lowercase = self.speech_processor.tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , normalize=snake_case )[
0
]
if isinstance(snake_case , snake_case ):
lowercase = 1
elif isinstance(snake_case , snake_case ):
lowercase = len(snake_case )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(snake_case )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case , snake_case ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(snake_case )}.''' )
# get prompt text embeddings
lowercase = self.tokenizer(
snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase = text_embeddings.shape
lowercase = text_embeddings.repeat(1 , snake_case , 1 )
lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = 42
if negative_prompt is None:
lowercase = [''] * batch_size
elif type(snake_case ) is not type(snake_case ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(snake_case )} !='''
F''' {type(snake_case )}.''' )
elif isinstance(snake_case , snake_case ):
lowercase = [negative_prompt]
elif batch_size != len(snake_case ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(snake_case )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
' the batch size of `prompt`.' )
else:
lowercase = negative_prompt
lowercase = text_input_ids.shape[-1]
lowercase = self.tokenizer(
snake_case , padding='max_length' , max_length=snake_case , truncation=snake_case , return_tensors='pt' , )
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase = uncond_embeddings.shape[1]
lowercase = uncond_embeddings.repeat(1 , snake_case , 1 )
lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase = torch.randn(snake_case , generator=snake_case , device='cpu' , dtype=snake_case ).to(
self.device )
else:
lowercase = torch.randn(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase = {}
if accepts_eta:
lowercase = eta
for i, t in enumerate(self.progress_bar(snake_case ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(snake_case , snake_case )
# predict the noise residual
lowercase = self.unet(snake_case , snake_case , encoder_hidden_states=snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(snake_case , snake_case , snake_case , **snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case , snake_case , snake_case )
lowercase = 1 / 0.18_215 * latents
lowercase = self.vae.decode(snake_case ).sample
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(snake_case )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case , nsfw_content_detected=snake_case )
| 195 |
import sys
from collections import defaultdict
class A_ :
'''simple docstring'''
def __init__( self ):
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = pos
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowercase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowercase = 2 * start + 1
else:
lowercase = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowercase , lowercase = heap[smallest_child], positions[smallest_child]
lowercase , lowercase = (
heap[start],
positions[start],
)
lowercase , lowercase = temp, tempa
lowercase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case )
self.top_to_bottom(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ):
lowercase = position[index]
while index != 0:
lowercase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowercase = heap[parent]
lowercase = position[parent]
self.set_position(position[parent] , snake_case )
else:
lowercase = val
lowercase = temp
self.set_position(snake_case , snake_case )
break
lowercase = parent
else:
lowercase = val
lowercase = temp
self.set_position(snake_case , 0 )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = len(snake_case ) // 2 - 1
for i in range(snake_case , -1 , -1 ):
self.top_to_bottom(snake_case , snake_case , len(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = positions[0]
lowercase = sys.maxsize
self.top_to_bottom(snake_case , 0 , len(snake_case ) , snake_case )
return temp
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = Heap()
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = [-1] * len(__SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowercase = [] # Heap of Distance of vertices from their neighboring vertex
lowercase = []
for vertex in range(len(__SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(__SCREAMING_SNAKE_CASE )
heap.node_position.append(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = 1
lowercase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowercase = 0
lowercase = distance
heap.heapify(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for _ in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
lowercase = heap.delete_minimum(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowercase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__SCREAMING_SNAKE_CASE )]
):
lowercase = distance
heap.bottom_to_top(
__SCREAMING_SNAKE_CASE , heap.get_position(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase = int(input('''Enter number of edges: ''').strip())
UpperCAmelCase = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 195 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
A__ = ViTImageProcessor if is_vision_available() else None
@property
def A_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Any = (3, 32, 128)
__snake_case : Tuple = tempfile.mkdtemp()
# fmt: off
__snake_case : Optional[int] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__snake_case : Optional[int] = dict(zip(__a , range(len(__a ) ) ) )
__snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
__snake_case : Union[str, Any] = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
__snake_case : Dict = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def A_ ( self : str , **__a : Any ) -> str:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] , **__a : Any ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A_ ( self : int ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__snake_case : Dict = Image.fromarray(np.moveaxis(__a , 0 , -1 ) )
return image_input
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Dict = self.get_tokenizer()
__snake_case : List[str] = self.get_image_processor()
__snake_case : Optional[int] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__snake_case : Union[str, Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : str = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : Optional[int] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__snake_case : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case : str = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__snake_case : Any = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def A_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Optional[Any] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Optional[Any] = self.prepare_image_inputs()
__snake_case : Any = image_processor(__a , return_tensors='np' )
__snake_case : Optional[int] = processor(images=__a , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : List[Any] = self.get_tokenizer()
__snake_case : int = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Tuple = 'test'
__snake_case : Tuple = processor(text=__a )
__snake_case : Tuple = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : int ) -> Dict:
'''simple docstring'''
__snake_case : Dict = self.get_image_processor()
__snake_case : Optional[int] = self.get_tokenizer()
__snake_case : Optional[Any] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Dict = 'test'
__snake_case : Tuple = self.prepare_image_inputs()
__snake_case : Optional[Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def A_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : str = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Tuple = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : List[Any] = processor.char_decode(__a )
__snake_case : Any = tokenizer.batch_decode(__a )
__snake_case : List[Any] = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(__a , __a )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Union[str, Any] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__snake_case : Optional[int] = None
__snake_case : List[Any] = self.prepare_image_inputs()
__snake_case : Optional[Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Optional[Any] = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Optional[int] = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__snake_case : str = torch.randn(1 , 27 , 38 )
__snake_case : int = torch.randn(1 , 27 , 50257 )
__snake_case : Optional[int] = torch.randn(1 , 27 , 30522 )
__snake_case : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 0 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : int ) -> bool:
__snake_case : Union[str, Any] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 0 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__A : Optional[int] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__A : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu"
def UpperCamelCase_ ( A__ : str , A__ : Tuple=1_00 , A__ : int=" " ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = text.split(snake_case_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(snake_case_ ) , snake_case_ )]
def UpperCamelCase_ ( A__ : dict ):
'''simple docstring'''
lowerCAmelCase_, lowerCAmelCase_ : List[str] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(snake_case_ ):
titles.append(title if title is not None else """""" )
texts.append(snake_case_ )
return {"title": titles, "text": texts}
def UpperCamelCase_ ( A__ : dict , A__ : DPRContextEncoder , A__ : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=snake_case_ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowerCAmelCase_ : Dict = ctx_encoder(input_ids.to(device=snake_case_ ) , return_dict=snake_case_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCamelCase_ ( A__ : "RagExampleArguments" , A__ : "ProcessingArguments" , A__ : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase_ : List[Any] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase_ : Any = dataset.map(snake_case_ , batched=snake_case_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase_ : Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case_ )
lowerCAmelCase_ : Optional[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase_ : Optional[int] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase_ : Union[str, Any] = dataset.map(
partial(snake_case_ , ctx_encoder=snake_case_ , ctx_tokenizer=snake_case_ ) , batched=snake_case_ , batch_size=processing_args.batch_size , features=snake_case_ , )
# And finally save your dataset
lowerCAmelCase_ : Any = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(snake_case_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase_ : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=snake_case_ )
# And save the index
lowerCAmelCase_ : Dict = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(snake_case_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
default=str(Path(_UpperCAmelCase).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv') ,metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} ,)
lowercase = field(
default=_UpperCAmelCase ,metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} ,)
lowercase = field(
default='facebook/rag-sequence-nq' ,metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} ,)
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' ,metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} ,)
lowercase = field(
default=str(Path(_UpperCAmelCase).parent / 'test_run' / 'dummy-kb') ,metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} ,)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
default=_UpperCAmelCase ,metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} ,)
lowercase = field(
default=16 ,metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} ,)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
default=7_68 ,metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} ,)
lowercase = field(
default=1_28 ,metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__A : Union[str, Any] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__A , __A , __A : Optional[int] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__A : str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 120 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : Tuple = mock.Mock()
snake_case__ : str = 5_0_0
snake_case__ : Optional[Any] = {}
snake_case__ : int = HTTPError
snake_case__ : Dict = {}
# Download this model to make sure it's in the cache.
snake_case__ : Dict = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _lowercase ( self : List[Any] ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : List[Any] = mock.Mock()
snake_case__ : Any = 5_0_0
snake_case__ : List[str] = {}
snake_case__ : int = HTTPError
snake_case__ : List[Any] = {}
# Download this model to make sure it's in the cache.
snake_case__ : Optional[Any] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__A ) as mock_head:
snake_case__ : Any = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self : str ):
# This test is for deprecated behavior and can be removed in v5
try:
snake_case__ : List[str] = tempfile.mktemp()
with open(__A , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __A )
snake_case__ : int = AlbertTokenizer.from_pretrained(__A )
finally:
os.remove(__A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __A )
snake_case__ : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _lowercase ( self : Union[str, Any] ):
# This test is for deprecated behavior and can be removed in v5
snake_case__ : Optional[int] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _lowercase ( cls : Tuple ):
snake_case__ : int = TOKEN
HfFolder.save_token(__A )
@classmethod
def _lowercase ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _lowercase ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : str = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Optional[Any] = BertTokenizer(__A )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
snake_case__ : List[Any] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__A , repo_id="test-tokenizer" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : List[Any] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _lowercase ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Union[str, Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Tuple = BertTokenizer(__A )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
snake_case__ : Optional[Any] = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__A , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__A , use_auth_token=self._token )
snake_case__ : int = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _lowercase ( self : Tuple ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : List[Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : int = CustomTokenizer(__A )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Union[str, Any] = os.path.join(__A , "vocab.txt" )
with open(__A , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case__ : Optional[Any] = BertTokenizerFast.from_pretrained(__A )
bert_tokenizer.save_pretrained(__A )
snake_case__ : Dict = CustomTokenizerFast.from_pretrained(__A )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case__ : Dict = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
snake_case__ : str = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__A , trust_remote_code=__A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _lowercase ( self : List[Any] ):
snake_case__ : int = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _lowercase ( self : Any ):
snake_case__ : Optional[int] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _lowercase ( self : int ):
snake_case__ : Union[str, Any] = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : str = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _lowercase ( self : Any ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case__ : Tuple = Trie()
snake_case__ : List[Any] = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__A , ["AB", "C"] )
| 286 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ["image_processor", "tokenizer"]
a_ = "ViltImageProcessor"
a_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[int] , __A : Optional[int]=None , __A : Optional[Any]=None , **__A : int ):
snake_case__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
snake_case__ : Tuple = kwargs.pop("feature_extractor" )
snake_case__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
snake_case__ : Tuple = self.image_processor
def __call__( self : List[Any] , __A : int , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ):
snake_case__ : Optional[int] = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
snake_case__ : Optional[Any] = self.image_processor(__A , return_tensors=__A )
encoding.update(__A )
return encoding
def _lowercase ( self : Optional[Any] , *__A : List[str] , **__A : Optional[int] ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowercase ( self : Dict , *__A : str , **__A : str ):
return self.tokenizer.decode(*__A , **__A )
@property
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = self.tokenizer.model_input_names
snake_case__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def _lowercase ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 286 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:Union[str, Any] = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[Any] ='''pt'''
elif is_tf_available():
_A : Any ='''tf'''
else:
_A : List[str] ='''jax'''
class _lowercase ( _lowercase , unittest.TestCase ):
a = ByTaTokenizer
a = False
def lowerCamelCase_ ( self: str ):
super().setUp()
lowerCamelCase__ : str = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self: Optional[int] ):
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def lowerCamelCase_ ( self: Any , **UpperCamelCase__: Tuple ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Any=False , UpperCamelCase__: Union[str, Any]=20 , UpperCamelCase__: Optional[int]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase__ : List[str] = []
for i in range(len(UpperCamelCase__ ) ):
try:
lowerCamelCase__ : Tuple = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda UpperCamelCase__ : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase__ ) )
lowerCamelCase__ : Tuple = list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
lowerCamelCase__ : Dict = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
lowerCamelCase__ : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
lowerCamelCase__ : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
lowerCamelCase__ : str = """ """ + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : Union[str, Any] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
lowerCamelCase__ : Optional[int] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = self.ta_base_tokenizer
lowerCamelCase__ : Dict = """Unicode €."""
lowerCamelCase__ : List[Any] = tokenizer(UpperCamelCase__ )
lowerCamelCase__ : List[str] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : Tuple = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """Unicode €.</s>""" )
lowerCamelCase__ : List[Any] = tokenizer("""e è é ê ë""" )
lowerCamelCase__ : List[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , UpperCamelCase__ )
# decoding
lowerCamelCase__ : str = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
lowerCamelCase__ : List[str] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowerCamelCase__ : int = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
lowerCamelCase__ : Any = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : List[str] = self.ta_base_tokenizer
lowerCamelCase__ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCamelCase__ : Tuple = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , UpperCamelCase__ )
self.assertIn("""attention_mask""" , UpperCamelCase__ )
self.assertNotIn("""decoder_input_ids""" , UpperCamelCase__ )
self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = self.ta_base_tokenizer
lowerCamelCase__ : List[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
lowerCamelCase__ : Union[str, Any] = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Tuple = self.ta_base_tokenizer
lowerCamelCase__ : str = ["""A long paragraph for summarization. </s>"""]
lowerCamelCase__ : Optional[Any] = ["""Summary of the text. </s>"""]
# fmt: off
lowerCamelCase__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowerCamelCase__ : Any = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowerCamelCase__ : Any = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch["""input_ids"""][0] )
self.assertEqual(UpperCamelCase__ , batch["""labels"""][0] )
def lowerCamelCase_ ( self: Optional[int] ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : int = tempfile.mkdtemp()
lowerCamelCase__ : List[str] = """ He is very happy, UNwant\u00E9d,running"""
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
lowerCamelCase__ : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : Optional[Any] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
lowerCamelCase__ : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : int = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Any = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Union[str, Any] = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Optional[Any] = json.load(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = [F'''<extra_id_{i}>''' for i in range(125 )]
lowerCamelCase__ : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[Any] = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase__ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: str ):
pass
def lowerCamelCase_ ( self: List[str] ):
pass
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: int ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
lowerCamelCase__ : Dict = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Dict = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCamelCase__ : str = 0
lowerCamelCase__ : Any = tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [] )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 41 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : dict ):
"""simple docstring"""
lowercase_ :int = set()
# edges = list of graph's edges
lowercase_ :List[Any] = get_edges(__lowerCamelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowercase_ :Optional[int] = edges.pop()
chosen_vertices.add(__lowerCamelCase )
chosen_vertices.add(__lowerCamelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__lowerCamelCase )
return chosen_vertices
def UpperCAmelCase_ ( __lowerCamelCase : dict ):
"""simple docstring"""
lowercase_ :Union[str, Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 368 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ):
lowercase_ :Optional[Any] = len(__lowerCamelCase )
lowercase_ :List[Any] = []
for i in range(len(__lowerCamelCase ) - pat_len + 1 ):
lowercase_ :Dict = True
for j in range(__lowerCamelCase ):
if s[i + j] != pattern[j]:
lowercase_ :int = False
break
if match_found:
position.append(__lowerCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 147 | 0 |
"""simple docstring"""
from collections import deque
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
'''simple docstring'''
snake_case : str = process_name # process name
snake_case : Optional[int] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
snake_case : List[Any] = arrival_time
snake_case : Optional[Any] = burst_time # remaining burst time
snake_case : List[Any] = 0 # total time of the process wait in ready queue
snake_case : Optional[int] = 0 # time from arrival time to completion time
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> None:
'''simple docstring'''
snake_case : Dict = number_of_queues
# time slice of queues that round robin algorithm applied
snake_case : int = time_slices
# unfinished process is in this ready_queue
snake_case : Optional[int] = queue
# current time
snake_case : Optional[int] = current_time
# finished process is in this sequence queue
snake_case : deque[Process] = deque()
def lowerCamelCase ( self ) -> list[str]:
'''simple docstring'''
snake_case : List[str] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCamelCase ( self , UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
snake_case : Union[str, Any] = []
for i in range(len(UpperCamelCase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCamelCase ( self , UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
snake_case : Optional[Any] = []
for i in range(len(UpperCamelCase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCamelCase ( self , UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
snake_case : List[Any] = []
for i in range(len(UpperCamelCase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCamelCase ( self , UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def lowerCamelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase ( self , UpperCamelCase__ ) -> deque[Process]:
'''simple docstring'''
snake_case : deque[Process] = deque() # sequence deque of finished process
while len(UpperCamelCase__ ) != 0:
snake_case : Union[str, Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCamelCase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
snake_case : List[str] = 0
# set the process's turnaround time because it is finished
snake_case : Tuple = self.current_time - cp.arrival_time
# set the completion time
snake_case : Dict = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> tuple[deque[Process], deque[Process]]:
'''simple docstring'''
snake_case : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCamelCase__ ) ):
snake_case : Optional[int] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCamelCase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
snake_case : Union[str, Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCamelCase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
snake_case : Union[str, Any] = 0
# set the finish time
snake_case : Tuple = self.current_time
# update the process' turnaround time because it is finished
snake_case : List[Any] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase ( self ) -> deque[Process]:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
snake_case ,snake_case : Union[str, Any] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__snake_case = Process("""P1""", 0, 53)
__snake_case = Process("""P2""", 0, 17)
__snake_case = Process("""P3""", 0, 68)
__snake_case = Process("""P4""", 0, 24)
__snake_case = 3
__snake_case = [17, 25]
__snake_case = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
__snake_case = Process("""P1""", 0, 53)
__snake_case = Process("""P2""", 0, 17)
__snake_case = Process("""P3""", 0, 68)
__snake_case = Process("""P4""", 0, 24)
__snake_case = 3
__snake_case = [17, 25]
__snake_case = deque([Pa, Pa, Pa, Pa])
__snake_case = MLFQ(number_of_queues, time_slices, queue, 0)
__snake_case = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 203 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__snake_case = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def __lowerCAmelCase ( lowercase : str = "dhaka" , lowercase : int = 5 ) -> int:
"""simple docstring"""
snake_case : List[Any] = min(lowercase , 50 ) # Prevent abuse!
snake_case : Optional[Any] = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
snake_case : str = requests.get("https://www.google.com/search" , params=lowercase , headers=lowercase )
snake_case : List[str] = BeautifulSoup(html.text , "html.parser" )
snake_case : List[Any] = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
snake_case : Optional[Any] = json.dumps(lowercase )
snake_case : str = json.loads(lowercase )
snake_case : List[str] = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , lowercase , )
if not matched_google_image_data:
return 0
snake_case : List[str] = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(lowercase ) , )
snake_case : Dict = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , lowercase , )
for index, fixed_full_res_image in enumerate(lowercase ):
if index >= max_images:
return index
snake_case : List[str] = bytes(lowercase , "ascii" ).decode(
"unicode-escape" )
snake_case : Dict = bytes(lowercase , "ascii" ).decode(
"unicode-escape" )
snake_case : int = urllib.request.build_opener()
snake_case : int = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(lowercase )
snake_case : Optional[int] = F'query_{query.replace(" " , "_" )}'
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
urllib.request.urlretrieve( # noqa: S310
lowercase , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
__snake_case = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print("""Please provide a search term.""")
raise
| 203 | 1 |
'''simple docstring'''
import functools
from typing import Any
def lowerCamelCase__ ( _A , _A ):
# Validation
if not isinstance(_A , _A ) or len(_A ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(_A , _A ) or not all(
isinstance(_A , _A ) and len(_A ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
a : dict[str, Any] = {}
a : Tuple = 'WORD_KEEPER'
for word in words:
a : Dict = trie
for c in word:
if c not in trie_node:
a : List[str] = {}
a : List[str] = trie_node[c]
a : List[Any] = True
a : Optional[int] = len(_A )
# Dynamic programming method
@functools.cache
def is_breakable(_A ) -> bool:
if index == len_string:
return True
a : Dict = trie
for i in range(_A , _A ):
a : Union[str, Any] = trie_node.get(string[i] , _A )
if trie_node is None:
return False
if trie_node.get(_A , _A ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase: List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[int] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 96 | 1 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( a , a=1 ) -> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( a , a=0 ) -> int:
_A: Union[str, Any] = []
for old_item in old_list:
_A: Optional[int] = old_item.replace('''in_layers.0''' , '''norm1''' )
_A: int = new_item.replace('''in_layers.2''' , '''conv1''' )
_A: Optional[Any] = new_item.replace('''out_layers.0''' , '''norm2''' )
_A: Union[str, Any] = new_item.replace('''out_layers.3''' , '''conv2''' )
_A: Dict = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
_A: Any = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
_A: Union[str, Any] = shave_segments(a , n_shave_prefix_segments=a )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( a , a=0 ) -> Union[str, Any]:
_A: Tuple = []
for old_item in old_list:
_A: Tuple = old_item
_A: int = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
_A: List[str] = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
_A: Tuple = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
_A: Tuple = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
_A: List[Any] = shave_segments(a , n_shave_prefix_segments=a )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None ) -> int:
assert isinstance(a , a ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_A: Any = old_checkpoint[path]
_A: str = old_tensor.shape[0] // 3
_A: List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_A: Optional[Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
_A: Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_A: List[Any] = old_tensor.split(channels // num_heads , dim=1 )
_A: Optional[Any] = query.reshape(a )
_A: Any = key.reshape(a )
_A: Tuple = value.reshape(a )
for path in paths:
_A: Optional[int] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_A: Tuple = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
_A: Tuple = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
_A: Optional[Any] = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
_A: Tuple = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_A: int = old_checkpoint[path["""old"""]][:, :, 0]
else:
_A: List[str] = old_checkpoint[path["""old"""]]
def lowerCamelCase__ ( a , a ) -> int:
_A: str = {}
_A: Any = checkpoint["""time_embed.0.weight"""]
_A: Union[str, Any] = checkpoint["""time_embed.0.bias"""]
_A: List[str] = checkpoint["""time_embed.2.weight"""]
_A: Union[str, Any] = checkpoint["""time_embed.2.bias"""]
_A: str = checkpoint["""input_blocks.0.0.weight"""]
_A: str = checkpoint["""input_blocks.0.0.bias"""]
_A: Optional[Any] = checkpoint["""out.0.weight"""]
_A: List[Any] = checkpoint["""out.0.bias"""]
_A: Tuple = checkpoint["""out.2.weight"""]
_A: int = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
_A: Optional[int] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
_A: Tuple = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(a )
}
# Retrieves the keys for the middle blocks only
_A: Dict = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
_A: List[str] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(a )
}
# Retrieves the keys for the output blocks only
_A: List[str] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
_A: Optional[int] = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(a )
}
for i in range(1 , a ):
_A: Tuple = (i - 1) // (config["""num_res_blocks"""] + 1)
_A: Union[str, Any] = (i - 1) % (config["""num_res_blocks"""] + 1)
_A: int = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_A: int = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_A: Dict = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_A: Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_A: Dict = renew_resnet_paths(a )
_A: Tuple = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_A: Any = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
a , a , a , additional_replacements=[meta_path, resnet_op] , config=a )
if len(a ):
_A: Union[str, Any] = renew_attention_paths(a )
_A: Dict = {
"""old""": f"""input_blocks.{i}.1""",
"""new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_A: Any = {
f"""input_blocks.{i}.1.qkv.bias""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
a , a , a , additional_replacements=[meta_path] , attention_paths_to_split=a , config=a , )
_A: Optional[int] = middle_blocks[0]
_A: Union[str, Any] = middle_blocks[1]
_A: Any = middle_blocks[2]
_A: int = renew_resnet_paths(a )
assign_to_checkpoint(a , a , a , config=a )
_A: Optional[int] = renew_resnet_paths(a )
assign_to_checkpoint(a , a , a , config=a )
_A: str = renew_attention_paths(a )
_A: Union[str, Any] = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
a , a , a , attention_paths_to_split=a , config=a )
for i in range(a ):
_A: List[str] = i // (config["""num_res_blocks"""] + 1)
_A: List[str] = i % (config["""num_res_blocks"""] + 1)
_A: Optional[Any] = [shave_segments(a , 2 ) for name in output_blocks[i]]
_A: str = {}
for layer in output_block_layers:
_A: Tuple = layer.split('''.''' )[0], shave_segments(a , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(a )
else:
_A: List[str] = [layer_name]
if len(a ) > 1:
_A: str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_A: Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_A: str = renew_resnet_paths(a )
_A: int = renew_resnet_paths(a )
_A: Union[str, Any] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_A: List[Any] = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
_A: Union[str, Any] = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_A: Optional[int] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(a ) == 2:
_A: Optional[Any] = []
if len(a ):
_A: Optional[int] = renew_attention_paths(a )
_A: int = {
"""old""": f"""output_blocks.{i}.1""",
"""new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_A: Dict = {
f"""output_blocks.{i}.1.qkv.bias""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
a , a , a , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=a , )
else:
_A: Union[str, Any] = renew_resnet_paths(a , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_A: List[Any] = """.""".join(['''output_blocks''', str(a ), path['''old''']] )
_A: Tuple = """.""".join(['''up_blocks''', str(a ), '''resnets''', str(a ), path['''new''']] )
_A: Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCAmelCase__ : Any = parser.parse_args()
UpperCAmelCase__ : Dict = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCAmelCase__ : List[Any] = json.loads(f.read())
UpperCAmelCase__ : int = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCAmelCase__ : Union[str, Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCAmelCase__ : Any = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
UpperCAmelCase__ : str = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
UpperCAmelCase__ : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 121 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCAmelCase = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_UpperCAmelCase = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_UpperCAmelCase = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ={prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
SCREAMING_SNAKE_CASE_: Tuple =[
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_: str =evaluate(dataset=lowerCAmelCase , predictions=lowerCAmelCase )
return score
| 173 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , ):
"""simple docstring"""
_snake_case = size if size is not None else {'height': 18, 'width': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = apply_ocr
def lowerCamelCase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'apply_ocr' ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase_ )
self.assertIsInstance(encoding.boxes , lowerCAmelCase_ )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
_snake_case = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
_snake_case = Image.open(ds[0]['file'] ).convert('RGB' )
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_snake_case = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase_ )
self.assertListEqual(encoding.boxes , lowerCAmelCase_ )
# with apply_OCR = False
_snake_case = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ )
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 351 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """SpeechT5FeatureExtractor"""
__lowercase = """SpeechT5Tokenizer"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = kwargs.pop('audio' , lowerCAmelCase_ )
_snake_case = kwargs.pop('text' , lowerCAmelCase_ )
_snake_case = kwargs.pop('text_target' , lowerCAmelCase_ )
_snake_case = kwargs.pop('audio_target' , lowerCAmelCase_ )
_snake_case = kwargs.pop('sampling_rate' , lowerCAmelCase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
_snake_case = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
elif text is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
else:
_snake_case = None
if audio_target is not None:
_snake_case = self.feature_extractor(audio_target=lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_values']
elif text_target is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_ids']
else:
_snake_case = None
if inputs is None:
return targets
if targets is not None:
_snake_case = labels
_snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_snake_case = decoder_attention_mask
return inputs
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = kwargs.pop('input_values' , lowerCAmelCase_ )
_snake_case = kwargs.pop('input_ids' , lowerCAmelCase_ )
_snake_case = kwargs.pop('labels' , lowerCAmelCase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
_snake_case = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
elif input_ids is not None:
_snake_case = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
else:
_snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and "input_ids" in labels[0]):
_snake_case = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_ids']
else:
_snake_case = self.feature_extractor.feature_size
_snake_case = self.feature_extractor.num_mel_bins
_snake_case = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = feature_size_hack
_snake_case = targets['input_values']
else:
_snake_case = None
if inputs is None:
return targets
if targets is not None:
_snake_case = labels
_snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_snake_case = decoder_attention_mask
return inputs
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 160 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if openai_config_file == "":
lowercase = OpenAIGPTConfig()
else:
lowercase = OpenAIGPTConfig.from_json_file(_snake_case )
lowercase = OpenAIGPTModel(_snake_case )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
lowercase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowercase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _snake_case )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
lowercase__ :Optional[int] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 101 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE : Dict = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
SCREAMING_SNAKE_CASE : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__snake_case )}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Whether ot not to use whole word mask.'} )
lowerCamelCase__ =field(
default=0.1_5, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase__ =field(
default=1 / 6, metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
}, )
lowerCamelCase__ =field(
default=5, metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
lowerCamelCase__ =field(
default=-1, metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase ( _snake_case : DataTrainingArguments , _snake_case : PreTrainedTokenizer , _snake_case : bool = False , _snake_case : Optional[str] = None , ) ->Any:
"""simple docstring"""
def _dataset(_snake_case : List[Any] , _snake_case : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , ref_path=_snake_case , )
return LineByLineTextDataset(tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowercase ( ) ->List[Any]:
"""simple docstring"""
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case : Union[str, Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__snake_case : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__snake_case : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__snake_case : int = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__snake_case : List[Any] = AutoModelWithLMHead.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__snake_case : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__snake_case : Optional[int] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__snake_case : Optional[Any] = (
get_dataset(_snake_case , tokenizer=_snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__snake_case : Any = (
get_dataset(_snake_case , tokenizer=_snake_case , evaluate=_snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__snake_case : List[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=_snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__snake_case : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_snake_case , mlm_probability=data_args.mlm_probability )
else:
__snake_case : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__snake_case : Optional[int] = Trainer(
model=_snake_case , args=_snake_case , data_collator=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , prediction_loss_only=_snake_case , )
# Training
if training_args.do_train:
__snake_case : Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Dict = math.exp(eval_output['''eval_loss'''] )
__snake_case : List[Any] = {'''perplexity''': perplexity}
__snake_case : str = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_snake_case )
return results
def lowercase ( _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 102 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ):
if index == number_of_items:
return 0
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase = values[index] + knapsack(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , max_weight - weights[index] , index + 1 )
return max(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ):
lowerCAmelCase = word_bank or []
# create a table
lowerCAmelCase = len(_UpperCAmelCase ) + 1
lowerCAmelCase = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowerCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowerCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 309 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Union[str, Any]:
__lowerCamelCase : Dict = len(UpperCAmelCase_ )
for i in range(length - 1 ):
__lowerCamelCase : str = i
for k in range(i + 1 , UpperCAmelCase_ ):
if collection[k] < collection[least]:
__lowerCamelCase : int = k
if least != i:
__lowerCamelCase , __lowerCamelCase : Optional[int] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
A__ : str = input("""Enter numbers separated by a comma:\n""").strip()
A__ : List[Any] = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 185 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : int = ort.SessionOptions()
__lowerCamelCase : Optional[Any] = False
return options
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__lowerCamelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__lowerCamelCase : Any = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = 'A red cat sitting on a park bench'
__lowerCamelCase : Union[str, Any] = np.random.RandomState(0 )
__lowerCamelCase : Union[str, Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , )
__lowerCamelCase : Optional[Any] = output.images
__lowerCamelCase : Tuple = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase : Union[str, Any] = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__lowerCamelCase : Dict = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
__lowerCamelCase : Tuple = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = 'A red cat sitting on a park bench'
__lowerCamelCase : Tuple = np.random.RandomState(0 )
__lowerCamelCase : Optional[int] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , )
__lowerCamelCase : List[str] = output.images
__lowerCamelCase : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__lowerCamelCase : Tuple = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 185 | 1 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__A : Union[str, Any] = 'src/diffusers'
# Matches is_xxx_available()
__A : Dict = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
__A : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
__A : Optional[Any] = '\n{0} = None\n'
__A : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
__A : Tuple = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def __UpperCamelCase ( _A : Optional[Any] ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(_A )
if len(_A ) == 0:
return None
return "_and_".join(_A )
def __UpperCamelCase ( ) ->Optional[int]:
"""simple docstring"""
with open(os.path.join(_A , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(_A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(_A ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(_A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_A ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def __UpperCamelCase ( _A : Union[str, Any] , _A : int ) ->Optional[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(_A )
elif name.islower():
return DUMMY_FUNCTION.format(_A , _A )
else:
return DUMMY_CLASS.format(_A , _A )
def __UpperCamelCase ( _A : Any=None ) ->Any:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ="""[""" + """, """.join(f'"{b}"' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase_ ="""# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_A , _A ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def __UpperCamelCase ( _A : Dict=False ) ->Tuple:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(_A , """utils""" )
lowerCamelCase_ ={
backend: os.path.join(_A , f'dummy_{short_names.get(_A , _A )}_objects.py' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_A ):
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =""""""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'Updating diffusers.utils.dummy_{short_names.get(_A , _A )}_objects.py as the main '
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
f'diffusers.utils.dummy_{short_names.get(_A , _A )}_objects.py. Run `make fix-copies` '
"""to fix this.""" )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__A : str = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 365 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( _A : List[Any] , _A : Tuple , _A : Union[str, Any] , _A : Optional[Any] ) ->Any:
"""simple docstring"""
# Initialise PyTorch model
lowerCamelCase_ =BigBirdConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
lowerCamelCase_ =BigBirdForQuestionAnswering(_A )
else:
lowerCamelCase_ =BigBirdForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_A , _A , is_trivia_qa=_A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_A )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
__A : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 49 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionXLImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCamelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCamelCase = CLIPTextModel(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = CLIPTextModelWithProjection(__UpperCAmelCase )
__lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase )
__lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
__lowerCamelCase = image / 2 + 0.5
if str(__UpperCAmelCase ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = sd_pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
__lowerCamelCase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# forward without prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = negative_prompt
__lowerCamelCase = 3 * [inputs['''prompt''']]
__lowerCamelCase = sd_pipe(**__UpperCAmelCase )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCamelCase = self.get_dummy_inputs(__UpperCAmelCase )
__lowerCamelCase = 3 * ['''this is a negative prompt''']
__lowerCamelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
__lowerCamelCase = sd_pipe(
**__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , )
__lowerCamelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
__lowerCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowerCamelCase = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
__lowerCamelCase = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
__lowerCamelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = self.get_inputs(__UpperCAmelCase )
__lowerCamelCase = pipe(**__UpperCAmelCase ).images
__lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 330 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ):
'''simple docstring'''
__lowerCamelCase = p_stop
__lowerCamelCase = max_length
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
__lowerCamelCase = random.random() < self.p_stop
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = [
BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
for i in range(2 )
]
__lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ):
'''simple docstring'''
random.seed(__UpperCAmelCase )
__lowerCamelCase = list(__UpperCAmelCase )
__lowerCamelCase = [
IterableDatasetShard(
__UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , )
for i in range(__UpperCAmelCase )
]
__lowerCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCAmelCase )
iterable_dataset_lists.append(list(__UpperCAmelCase ) )
__lowerCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowerCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 )
__lowerCamelCase = []
for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCAmelCase ) < len(__UpperCAmelCase ):
reference += reference
self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 42
__lowerCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
# Edge case with a very small dataset
__lowerCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 )
self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
__lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCamelCase ( self ):
'''simple docstring'''
Accelerator()
__lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 330 | 1 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _lowerCamelCase( lowercase__ ) -> Optional[Any]:
'''simple docstring'''
return EnvironmentCommand()
class A ( A_ ):
@staticmethod
def _A (lowerCAmelCase ):
__lowercase= parser.add_parser('env' )
download_parser.set_defaults(func=lowerCAmelCase )
def _A (self ):
__lowercase= huggingface_hub.__version__
__lowercase= 'not installed'
__lowercase= 'NA'
if is_torch_available():
import torch
__lowercase= torch.__version__
__lowercase= torch.cuda.is_available()
__lowercase= 'not installed'
if is_transformers_available():
import transformers
__lowercase= transformers.__version__
__lowercase= 'not installed'
if is_accelerate_available():
import accelerate
__lowercase= accelerate.__version__
__lowercase= 'not installed'
if is_xformers_available():
import xformers
__lowercase= xformers.__version__
__lowercase= {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f'{pt_version} ({pt_cuda_available})',
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(lowerCAmelCase ) )
return info
@staticmethod
def _A (lowerCAmelCase ):
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 304 |
lowerCAmelCase = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 304 | 1 |
'''simple docstring'''
from math import factorial
def _lowercase ( __A = 20 ):
'''simple docstring'''
__UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__UpperCamelCase = n // 2
return int(factorial(__A ) / (factorial(__A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
a__ : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 349 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = FlaxPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(lowercase , lowercase )
__UpperCamelCase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase , lowercase=None , **lowercase ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCamelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase , lowercase , lowercase ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__UpperCamelCase = np.ones((1, 1) )
__UpperCamelCase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase )
__UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 349 | 1 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
A_ : Any =logging.getLogger(__name__)
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = "masked_bert"
def __init__( self , a__=3_05_22 , a__=7_68 , a__=12 , a__=12 , a__=30_72 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=1e-12 , a__=0 , a__="topK" , a__="constant" , a__=0.0 , **a__ , ):
super().__init__(pad_token_id=a__ , **a__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = pruning_method
_lowerCamelCase = mask_init
_lowerCamelCase = mask_scale
| 371 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Any ={
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A_ : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a : Union[str, Any] = KandinskyVaaImgaImgPipeline
_a : Optional[Any] = ['image_embeds', 'negative_image_embeds', 'image']
_a : List[Any] = [
'image_embeds',
'negative_image_embeds',
'image',
]
_a : Optional[Any] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_a : Any = False
@property
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return 100
@property
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_UpperCAmelCase = UNetaDConditionModel(**_a )
return model
@property
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.dummy_unet
_UpperCAmelCase = self.dummy_movq
_UpperCAmelCase = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_UpperCAmelCase = DDIMScheduler(**_a )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> str:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
_UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((256, 256) )
if str(_a ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(_a )
else:
_UpperCAmelCase = torch.Generator(device=_a ).manual_seed(_a )
_UpperCAmelCase = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**_a )
_UpperCAmelCase = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase = pipe(**self.get_dummy_inputs(_a ) )
_UpperCAmelCase = output.images
_UpperCAmelCase = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_UpperCAmelCase = 'A red cartoon frog, 4k'
_UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_UpperCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase , _UpperCAmelCase = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_UpperCAmelCase = pipeline(
image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 329 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 1_0
lowercase_ = 2_5_6
def lowercase ( lowerCAmelCase__ : List[str] ) -> Optional[MinHash]:
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
__a = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowercase ( lowerCAmelCase__ : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , *,
_a = 0.85 , ):
__a = duplication_jaccard_threshold
__a = NUM_PERM
__a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = self._index.query(_a )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def __UpperCAmelCase ( self ):
__a = []
for base, duplicates in self._duplicate_clusters.items():
__a = [base] + list(_a )
# reformat the cluster to be a list of dict
__a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def __UpperCAmelCase ( self , _a ):
__a = self.get_duplicate_clusters()
with open(_a , '''w''' ) as f:
json.dump(_a , _a )
def lowercase ( lowerCAmelCase__ : List[str] ) -> int:
__a , __a = element
__a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowercase ( lowerCAmelCase__ : Type[Dataset] ) -> str:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase__ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float ) -> Dict:
__a = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase__ , lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
__a = get_tokens(lowerCAmelCase__ )
__a = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowercase_ = None
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
__a = []
for elementa in cluster:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(lowerCAmelCase__ , lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a = 1
extremes.append(lowerCAmelCase__ )
return extremes
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
global _shared_dataset
__a = dataset
__a = []
__a = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ , lowerCAmelCase__ , ) , total=len(lowerCAmelCase__ ) , ):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def lowercase ( lowerCAmelCase__ : Type[Dataset] , lowerCAmelCase__ : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
__a = make_duplicate_clusters(lowerCAmelCase__ , lowerCAmelCase__ )
__a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__a = {}
__a = find_extremes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
__a = element
__a = duplicate_indices - set(extreme_dict.keys() )
__a = dataset.filter(lambda lowerCAmelCase__ , lowerCAmelCase__ : idx not in remove_indices , with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__a = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(lowerCAmelCase__ )}''' )
print(f'''Number of duplicate clusters: {len(lowerCAmelCase__ )}''' )
print(f'''Files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Unique files in duplicate cluster: {len(lowerCAmelCase__ )}''' )
print(f'''Filtered dataset size: {len(lowerCAmelCase__ )}''' )
return ds_filter, duplicate_clusters
| 45 | 0 |
import warnings
from .generation import TFGenerationMixin
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , lowercase__ , )
| 120 |
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
while b:
a_ , a_ : int = b, a % b
return a
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(__A , a % b )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 120 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = ViTImageProcessor if is_vision_available() else None
@property
def __lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
a = (3, 32, 128)
a = tempfile.mkdtemp()
# fmt: off
a = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
a = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : List[Any] ) ->List[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : Tuple ) ->List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
a = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) )
return image_input
def __lowerCAmelCase ( self : Optional[Any] ) ->str:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->Any:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''test'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''test'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
a = processor.char_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
a = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = None
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = torch.randn(1 , 27 , 38 )
a = torch.randn(1 , 27 , 50_257 )
a = torch.randn(1 , 27 , 30_522 )
a = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 0 |
def _a ( a :int ) -> bool:
a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 0 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=16 , lowerCAmelCase=[32, 64, 1_28] , lowerCAmelCase=[1, 2, 1] , lowerCAmelCase=[2, 2, 4] , lowerCAmelCase=2 , lowerCAmelCase=2.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=10 , lowerCAmelCase=8 , lowerCAmelCase=["stage1", "stage2"] , lowerCAmelCase=[1, 2] , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = embed_dim
snake_case = hidden_sizes
snake_case = depths
snake_case = num_heads
snake_case = window_size
snake_case = mlp_ratio
snake_case = qkv_bias
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_absolute_embeddings
snake_case = patch_norm
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = is_training
snake_case = scope
snake_case = use_labels
snake_case = type_sequence_label_size
snake_case = encoder_stride
snake_case = out_features
snake_case = out_indices
def snake_case ( self ):
"""simple docstring"""
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = FocalNetModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase )
snake_case = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = FocalNetBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
snake_case = None
snake_case = FocalNetBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = FocalNetForMaskedImageModeling(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case = 1
snake_case = FocalNetForMaskedImageModeling(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.type_sequence_label_size
snake_case = FocalNetForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = FocalNetForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case = config_and_inputs
snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : List[str] = (
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase : int = False
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : List[str] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = FocalNetModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , embed_dim=37 , has_text_modality=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
"""simple docstring"""
return
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case = model_class(lowerCAmelCase )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
snake_case = outputs.hidden_states
snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# FocalNet has a different seq_length
snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
snake_case = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
snake_case ,snake_case ,snake_case ,snake_case = reshaped_hidden_states[0].shape
snake_case = (
reshaped_hidden_states[0].view(lowerCAmelCase , lowerCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
snake_case = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
snake_case = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width) )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = FocalNetModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
snake_case = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(lowerCAmelCase )
snake_case = self.default_image_processor
snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
snake_case = image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
snake_case = model(**lowerCAmelCase )
# verify the logits
snake_case = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
snake_case = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class lowerCAmelCase_ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = (FocalNetBackbone,) if is_torch_available() else ()
_lowerCAmelCase : Optional[int] = FocalNetConfig
_lowerCAmelCase : str = False
def snake_case ( self ):
"""simple docstring"""
snake_case = FocalNetModelTester(self )
| 149 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Any = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : str = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : str = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Any = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : int = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Any = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : int = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Any = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : str = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Dict = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ["""sentencepiece"""]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['sentencepiece'] )
| 149 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = """microsoft/speecht5_tts"""
lowercase_ : Union[str, Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
lowercase_ : int = """text_reader"""
lowercase_ : Any = SpeechTaProcessor
lowercase_ : int = SpeechTaForTextToSpeech
lowercase_ : Optional[Any] = SpeechTaHifiGan
lowercase_ : Optional[int] = ["""text"""]
lowercase_ : List[str] = ["""audio"""]
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.post_processor is None:
A_ : Union[str, Any] = 'microsoft/speecht5_hifigan'
super().setup()
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
A_ : Any = self.pre_processor(text=snake_case_ , return_tensors='pt' , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
A_ : Optional[Any] = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
A_ : List[Any] = torch.tensor(embeddings_dataset[7_3_0_5]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach() | 286 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """informer"""
lowercase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = None , snake_case_ = "mean" , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = True , snake_case_ = "gelu" , snake_case_ = 0.05 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_=True , snake_case_ = "prob" , snake_case_ = 5 , snake_case_ = True , **snake_case_ , ):
"""simple docstring"""
A_ : str = prediction_length
A_ : List[Any] = context_length or prediction_length
A_ : str = distribution_output
A_ : Dict = loss
A_ : Any = input_size
A_ : Union[str, Any] = num_time_features
A_ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : List[Any] = scaling
A_ : Tuple = num_dynamic_real_features
A_ : Any = num_static_real_features
A_ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Optional[int] = cardinality
else:
A_ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = embedding_dimension
else:
A_ : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : int = num_parallel_samples
# Transformer architecture configuration
A_ : str = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : List[Any] = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : List[Any] = encoder_ffn_dim
A_ : Union[str, Any] = decoder_ffn_dim
A_ : int = encoder_layers
A_ : Any = decoder_layers
A_ : List[Any] = dropout
A_ : str = attention_dropout
A_ : Tuple = activation_dropout
A_ : List[str] = encoder_layerdrop
A_ : List[str] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[int] = init_std
A_ : List[Any] = use_cache
# Informer
A_ : Tuple = attention_type
A_ : List[Any] = sampling_factor
A_ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 286 | 1 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
snake_case = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
return max(metric_fn(lowercase , lowercase ) for gt in ground_truths )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(lowercase , "r" ).readlines()]
SCREAMING_SNAKE_CASE : Any = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[Any] = pd.read_csv(lowercase , sep="\t" , header=lowercase )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : Any = ast.literal_eval(lowercase )
answers.append(lowercase )
else:
SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(lowercase , "r" ).readlines()]
SCREAMING_SNAKE_CASE : Tuple = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : List[Any] = 0
for prediction, ground_truths in zip(lowercase , lowercase ):
total += 1
em += metric_max_over_ground_truths(lowercase , lowercase , lowercase )
fa += metric_max_over_ground_truths(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE : str = 100.0 * em / total
SCREAMING_SNAKE_CASE : Dict = 100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = args.k
SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(lowercase , "r" ).readlines()]
SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(lowercase , "r" ).readlines()]
SCREAMING_SNAKE_CASE : Tuple = 0
for hypo, reference in zip(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = set(hypo.split("\t" )[:k] )
SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : List[Any] = 100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
def strip_title(lowercase ):
if title.startswith("\"" ):
SCREAMING_SNAKE_CASE : Dict = title[1:]
if title.endswith("\"" ):
SCREAMING_SNAKE_CASE : List[Any] = title[:-1]
return title
SCREAMING_SNAKE_CASE : str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowercase , return_tensors="pt" , padding=lowercase , truncation=lowercase , )["input_ids"].to(args.device )
SCREAMING_SNAKE_CASE : List[Any] = rag_model.rag.question_encoder(lowercase )
SCREAMING_SNAKE_CASE : Tuple = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever(
lowercase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Tuple = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : Optional[Any] = [strip_title(lowercase ) for title in docs["title"]]
provenance_strings.append("\t".join(lowercase ) )
return provenance_strings
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowercase , return_tensors="pt" , padding=lowercase , truncation=lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Tuple = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : str = rag_model.generate( # rag_model overwrites generate
lowercase , attention_mask=lowercase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowercase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.generator_tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
if args.print_predictions:
for q, a in zip(lowercase , lowercase ):
logger.info("Q: {} - A: {}".format(lowercase , lowercase ) )
return answers
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=lowercase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=lowercase , choices=["exact", "compressed", "legacy"] , type=lowercase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=lowercase , type=lowercase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=lowercase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=lowercase , type=lowercase , required=lowercase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=lowercase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=lowercase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=lowercase , type=lowercase , required=lowercase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=lowercase , type=lowercase , required=lowercase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=lowercase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=lowercase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=lowercase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=lowercase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=lowercase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=lowercase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : Any = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
SCREAMING_SNAKE_CASE : Optional[Any] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Tuple = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : int = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = args.index_path
else:
SCREAMING_SNAKE_CASE : str = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : List[str] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , lowercase )
SCREAMING_SNAKE_CASE : List[str] = get_scores if args.eval_mode == "e2e" else get_precision_at_k
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(lowercase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(lowercase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
SCREAMING_SNAKE_CASE : Any = RagRetriever.from_pretrained(lowercase , **lowercase )
SCREAMING_SNAKE_CASE : List[str] = model_class.from_pretrained(lowercase , retriever=lowercase , **lowercase )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(lowercase , **lowercase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
SCREAMING_SNAKE_CASE : Optional[int] = []
for line in tqdm(lowercase ):
questions.append(line.strip() )
if len(lowercase ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : List[Any] = evaluate_batch_fn(lowercase , lowercase , lowercase )
preds_file.write("\n".join(lowercase ) + "\n" )
preds_file.flush()
SCREAMING_SNAKE_CASE : Dict = []
if len(lowercase ) > 0:
SCREAMING_SNAKE_CASE : int = evaluate_batch_fn(lowercase , lowercase , lowercase )
preds_file.write("\n".join(lowercase ) )
preds_file.flush()
score_fn(lowercase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
snake_case = get_args()
main(args)
| 319 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 319 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCamelCase ( _A : List[str] , _A : Dict ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =checkpoint
lowerCamelCase_ ={}
lowerCamelCase_ =vae_state_dict["""encoder.conv_in.weight"""]
lowerCamelCase_ =vae_state_dict["""encoder.conv_in.bias"""]
lowerCamelCase_ =vae_state_dict["""encoder.conv_out.weight"""]
lowerCamelCase_ =vae_state_dict["""encoder.conv_out.bias"""]
lowerCamelCase_ =vae_state_dict["""encoder.norm_out.weight"""]
lowerCamelCase_ =vae_state_dict["""encoder.norm_out.bias"""]
lowerCamelCase_ =vae_state_dict["""decoder.conv_in.weight"""]
lowerCamelCase_ =vae_state_dict["""decoder.conv_in.bias"""]
lowerCamelCase_ =vae_state_dict["""decoder.conv_out.weight"""]
lowerCamelCase_ =vae_state_dict["""decoder.conv_out.bias"""]
lowerCamelCase_ =vae_state_dict["""decoder.norm_out.weight"""]
lowerCamelCase_ =vae_state_dict["""decoder.norm_out.bias"""]
lowerCamelCase_ =vae_state_dict["""quant_conv.weight"""]
lowerCamelCase_ =vae_state_dict["""quant_conv.bias"""]
lowerCamelCase_ =vae_state_dict["""post_quant_conv.weight"""]
lowerCamelCase_ =vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
lowerCamelCase_ =len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
lowerCamelCase_ ={
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(lowerCAmelCase__ )
}
# Retrieves the keys for the decoder up blocks only
lowerCamelCase_ =len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
lowerCamelCase_ ={
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(lowerCAmelCase__ )
}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ =[key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
lowerCamelCase_ =vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
lowerCamelCase_ =vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
lowerCamelCase_ =renew_vae_resnet_paths(lowerCAmelCase__ )
lowerCamelCase_ ={"""old""": f'down.{i}.block', """new""": f'down_blocks.{i}.resnets'}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
lowerCamelCase_ =[key for key in vae_state_dict if """encoder.mid.block""" in key]
lowerCamelCase_ =2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCamelCase_ =[key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
lowerCamelCase_ =renew_vae_resnet_paths(lowerCAmelCase__ )
lowerCamelCase_ ={"""old""": f'mid.block_{i}', """new""": f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
lowerCamelCase_ =[key for key in vae_state_dict if """encoder.mid.attn""" in key]
lowerCamelCase_ =renew_vae_attention_paths(lowerCAmelCase__ )
lowerCamelCase_ ={"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ =num_up_blocks - 1 - i
lowerCamelCase_ =[
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
lowerCamelCase_ =vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
lowerCamelCase_ =vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
lowerCamelCase_ =renew_vae_resnet_paths(lowerCAmelCase__ )
lowerCamelCase_ ={"""old""": f'up.{block_id}.block', """new""": f'up_blocks.{i}.resnets'}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
lowerCamelCase_ =[key for key in vae_state_dict if """decoder.mid.block""" in key]
lowerCamelCase_ =2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCamelCase_ =[key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
lowerCamelCase_ =renew_vae_resnet_paths(lowerCAmelCase__ )
lowerCamelCase_ ={"""old""": f'mid.block_{i}', """new""": f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
lowerCamelCase_ =[key for key in vae_state_dict if """decoder.mid.attn""" in key]
lowerCamelCase_ =renew_vae_attention_paths(lowerCAmelCase__ )
lowerCamelCase_ ={"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , additional_replacements=[meta_path] , config=lowerCAmelCase__ )
conv_attn_to_linear(lowerCAmelCase__ )
return new_checkpoint
def __UpperCamelCase ( _A : str , _A : str , ) ->int:
"""simple docstring"""
lowerCamelCase_ =requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
lowerCamelCase_ =io.BytesIO(r.content )
lowerCamelCase_ =OmegaConf.load(lowerCAmelCase__ )
lowerCamelCase_ =512
lowerCamelCase_ ="""cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
lowerCamelCase_ ={}
with safe_open(lowerCAmelCase__ , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
lowerCamelCase_ =f.get_tensor(lowerCAmelCase__ )
else:
lowerCamelCase_ =torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )["""state_dict"""]
# Convert the VAE model.
lowerCamelCase_ =create_vae_diffusers_config(lowerCAmelCase__ , image_size=lowerCAmelCase__ )
lowerCamelCase_ =custom_convert_ldm_vae_checkpoint(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase_ =AutoencoderKL(**lowerCAmelCase__ )
vae.load_state_dict(lowerCAmelCase__ )
vae.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__A : Any = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 154 |
from torch import nn
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 147 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=2 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=36 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=6 , lowerCAmelCase=6 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , lowerCAmelCase=10_00 , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = patch_size
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = coordinate_size
snake_case = shape_size
snake_case = num_labels
snake_case = num_choices
snake_case = scope
snake_case = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case = text_seq_length
snake_case = (image_size // patch_size) ** 2 + 1
snake_case = self.text_seq_length + self.image_seq_length
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
snake_case = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case = bbox[i, j, 3]
snake_case = bbox[i, j, 1]
snake_case = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case = bbox[i, j, 2]
snake_case = bbox[i, j, 0]
snake_case = tmp_coordinate
snake_case = tf.constant(lowerCAmelCase )
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = TFLayoutLMvaModel(config=lowerCAmelCase )
# text + image
snake_case = model(lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase )
snake_case = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , training=lowerCAmelCase , )
snake_case = model(lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case = model(lowerCAmelCase , training=lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case = model({'pixel_values': pixel_values} , training=lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = TFLayoutLMvaForSequenceClassification(config=lowerCAmelCase )
snake_case = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = TFLayoutLMvaForTokenClassification(config=lowerCAmelCase )
snake_case = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = 2
snake_case = TFLayoutLMvaForQuestionAnswering(config=lowerCAmelCase )
snake_case = model(
lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , training=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
((snake_case) ,(snake_case) ,(snake_case) ,(snake_case) ,(snake_case) ,(snake_case) ,(snake_case) ,(snake_case)) = config_and_inputs
snake_case = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Any = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase : Optional[int] = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_lowerCAmelCase : Any = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Optional[int] = False
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
return True
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
snake_case = copy.deepcopy(lowerCAmelCase )
if model_class in get_values(lowerCAmelCase ):
snake_case = {
k: tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCAmelCase , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase ):
snake_case = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase ):
snake_case = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
snake_case = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase ):
snake_case = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCAmelCase ):
snake_case = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def snake_case ( self ):
"""simple docstring"""
snake_case = TFLayoutLMvaModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(lowerCAmelCase )
if getattr(lowerCAmelCase , 'hf_compute_loss' , lowerCAmelCase ):
# The number of elements in the loss should be the same as the number of elements in the label
snake_case = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
snake_case = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCAmelCase )[0]
]
snake_case = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
snake_case = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
snake_case = prepared_for_class.pop('input_ids' )
snake_case = model(lowerCAmelCase , **lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
snake_case = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
snake_case = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
snake_case = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
snake_case = -1_00
snake_case = tf.convert_to_tensor(lowerCAmelCase )
snake_case = model(lowerCAmelCase , **lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
snake_case = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
snake_case = model(lowerCAmelCase )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
snake_case = self._prepare_for_class(inputs_dict.copy() , lowerCAmelCase , return_labels=lowerCAmelCase )
# Get keys that were added with the _prepare_for_class function
snake_case = prepared_for_class.keys() - inputs_dict.keys()
snake_case = inspect.signature(model.call ).parameters
snake_case = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
snake_case = {0: 'input_ids'}
for label_key in label_keys:
snake_case = signature_names.index(lowerCAmelCase )
snake_case = label_key
snake_case = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
snake_case = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
snake_case = prepared_for_class[value]
snake_case = tuple(lowerCAmelCase )
# Send to model
snake_case = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def snake_case ( self ):
"""simple docstring"""
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = TFLayoutLMvaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowerCAmelCase__ ( ) -> str:
"""simple docstring"""
snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase ) if is_vision_available() else None
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=lowerCAmelCase , return_tensors='tf' ).pixel_values
snake_case = tf.constant([[1, 2]] )
snake_case = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
snake_case = model(input_ids=lowerCAmelCase , bbox=lowerCAmelCase , pixel_values=lowerCAmelCase , training=lowerCAmelCase )
# verify the logits
snake_case = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase )
snake_case = tf.constant(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
| 149 | """simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = random.Random()
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any]=1.0 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
snake_case = global_rng
snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=4_00 , lowerCAmelCase=20_00 , lowerCAmelCase=20_48 , lowerCAmelCase=1_28 , lowerCAmelCase=1 , lowerCAmelCase=5_12 , lowerCAmelCase=30 , lowerCAmelCase=4_41_00 , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = min_seq_length
snake_case = max_seq_length
snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case = spectrogram_length
snake_case = feature_size
snake_case = num_audio_channels
snake_case = hop_length
snake_case = chunk_length
snake_case = sampling_rate
def snake_case ( self ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case ( self , lowerCAmelCase=False , lowerCAmelCase=False ):
"""simple docstring"""
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case = [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase_ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = TvltFeatureExtractor
def snake_case ( self ):
"""simple docstring"""
snake_case = TvltFeatureExtractionTester(self )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCAmelCase , 'spectrogram_length' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'num_audio_channels' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'hop_length' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'chunk_length' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate' ) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = feat_extract_first.save_pretrained(lowerCAmelCase )[0]
check_json_file_has_correct_format(lowerCAmelCase )
snake_case = self.feature_extraction_class.from_pretrained(lowerCAmelCase )
snake_case = feat_extract_first.to_dict()
snake_case = feat_extract_second.to_dict()
snake_case = dict_first.pop('mel_filters' )
snake_case = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = os.path.join(lowerCAmelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCAmelCase )
snake_case = self.feature_extraction_class.from_json_file(lowerCAmelCase )
snake_case = feat_extract_first.to_dict()
snake_case = feat_extract_second.to_dict()
snake_case = dict_first.pop('mel_filters' )
snake_case = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
snake_case = feature_extractor(lowerCAmelCase , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
snake_case = feature_extractor(
lowerCAmelCase , return_tensors='np' , sampling_rate=4_41_00 , mask_audio=lowerCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
snake_case = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case = np.asarray(lowerCAmelCase )
snake_case = feature_extractor(lowerCAmelCase , return_tensors='np' , sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
snake_case = ds.sort('id' ).select(range(lowerCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case ( self ):
"""simple docstring"""
snake_case = self._load_datasamples(1 )
snake_case = TvltFeatureExtractor()
snake_case = feature_extractor(lowerCAmelCase , return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) )
snake_case = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCAmelCase , atol=1E-4 ) )
| 149 | 1 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = VQModel
lowerCamelCase__ = """sample"""
@property
def A_ ( self , lowercase=(32, 32) ):
_lowerCamelCase : str = 4
_lowerCamelCase : Tuple = 3
_lowerCamelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase )
return {"sample": image}
@property
def A_ ( self ):
return (3, 32, 32)
@property
def A_ ( self ):
return (3, 32, 32)
def A_ ( self ):
_lowerCamelCase : List[str] = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
_lowerCamelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : List[str] = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowercase )
_lowerCamelCase : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(lowercase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_lowerCamelCase : Any = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_lowerCamelCase : List[str] = image.to(lowercase )
with torch.no_grad():
_lowerCamelCase : int = model(lowercase ).sample
_lowerCamelCase : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_lowerCamelCase : Tuple = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-3 ) ) | 96 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase_ : List[Any] = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
a =XLNetConfig.from_json_file(lowercase )
a =finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
a =finetuning_task
a =GLUE_TASKS_NUM_LABELS[finetuning_task]
a =XLNetForSequenceClassification(lowercase )
elif "squad" in finetuning_task:
a =finetuning_task
a =XLNetForQuestionAnswering(lowercase )
else:
a =XLNetLMHeadModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowercase , lowercase , lowercase )
# Save pytorch-model
a =os.path.join(lowercase , lowercase )
a =os.path.join(lowercase , lowercase )
print(f'''Save PyTorch model to {os.path.abspath(lowercase )}''' )
torch.save(model.state_dict() , lowercase )
print(f'''Save configuration file to {os.path.abspath(lowercase )}''' )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
lowerCamelCase_ : Dict = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 363 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , **__A ) -> Dict:
super().__init__(**__A )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(__A )
def __call__( self , __A , __A = None , **__A , ) -> List[str]:
if "text_queries" in kwargs:
a =kwargs.pop('''text_queries''' )
if isinstance(__A , (str, Image.Image) ):
a ={'''image''': image, '''candidate_labels''': candidate_labels}
else:
a =image
a =super().__call__(__A , **__A )
return results
def SCREAMING_SNAKE_CASE ( self , **__A ) -> Optional[Any]:
a ={}
if "threshold" in kwargs:
a =kwargs['''threshold''']
if "top_k" in kwargs:
a =kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
a =load_image(inputs['''image'''] )
a =inputs['''candidate_labels''']
if isinstance(__A , __A ):
a =candidate_labels.split(''',''' )
a =torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__A ):
a =self.tokenizer(__A , return_tensors=self.framework )
a =self.image_processor(__A , return_tensors=self.framework )
yield {
"is_last": i == len(__A ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[Any]:
a =model_inputs.pop('''target_size''' )
a =model_inputs.pop('''candidate_label''' )
a =model_inputs.pop('''is_last''' )
a =self.model(**__A )
a ={'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self , __A , __A=0.1 , __A=None ) -> List[str]:
a =[]
for model_output in model_outputs:
a =model_output['''candidate_label''']
a =BaseModelOutput(__A )
a =self.image_processor.post_process_object_detection(
outputs=__A , threshold=__A , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
a =outputs['''scores'''][index].item()
a =self._get_bounding_box(outputs['''boxes'''][index][0] )
a ={'''score''': score, '''label''': label, '''box''': box}
results.append(__A )
a =sorted(__A , key=lambda __A : x["score"] , reverse=__A )
if top_k:
a =results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
a , a , a , a =box.int().tolist()
a ={
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox | 215 | 0 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[int, Iterable[int]] , SCREAMING_SNAKE_CASE : bool , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , SCREAMING_SNAKE_CASE : Optional[int]=None ):
lowerCAmelCase : List[Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase : int = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase : Optional[int] = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase : List[Any] = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else output_size
lowerCAmelCase , lowerCAmelCase : Tuple = get_image_size(SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = output_size
# determine new height and width
lowerCAmelCase : Dict = output_height / input_height
lowerCAmelCase : Tuple = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase : Dict = scale_width
else:
# fit height
lowerCAmelCase : Dict = scale_height
lowerCAmelCase : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Any =["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = False , snake_case__ = 1 , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : int = size if size is not None else {"height": 384, "width": 384}
lowerCAmelCase : Tuple = get_size_dict(snake_case__ )
lowerCAmelCase : int = do_resize
lowerCAmelCase : Tuple = size
lowerCAmelCase : Dict = keep_aspect_ratio
lowerCAmelCase : List[Any] = ensure_multiple_of
lowerCAmelCase : Optional[int] = resample
lowerCAmelCase : Tuple = do_rescale
lowerCAmelCase : int = rescale_factor
lowerCAmelCase : List[Any] = do_normalize
lowerCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = 1 , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : int = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase : int = get_resize_output_image_size(
snake_case__ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=snake_case__ , multiple=snake_case__ , )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Dict = size if size is not None else self.size
lowerCAmelCase : Any = get_size_dict(snake_case__ )
lowerCAmelCase : Any = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase : List[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase : Tuple = resample if resample is not None else self.resample
lowerCAmelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCAmelCase : str = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase : Optional[Any] = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
lowerCAmelCase : Dict = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
lowerCAmelCase : Optional[int] = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
lowerCAmelCase : Optional[int] = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
lowerCAmelCase : Optional[Any] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowerCAmelCase : Any = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case__ ):
lowerCAmelCase : Optional[Any] = target_sizes.numpy()
lowerCAmelCase : Dict = []
for idx in range(len(snake_case__ ) ):
lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case__ )
lowerCAmelCase : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
lowerCAmelCase : int = logits.argmax(dim=1 )
lowerCAmelCase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 108 |
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ['''image_processor''']
__lowerCAmelCase = '''SamImageProcessor'''
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__a : Any = self.image_processor
__a : List[Any] = -10
__a : str = self.image_processor.size['''longest_edge''']
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : Tuple = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# pop arguments that are not used in the foward but used nevertheless
__a : Optional[Any] = encoding_image_processor['''original_sizes''']
if hasattr(_UpperCAmelCase , '''numpy''' ): # Checks if Torch or TF tensor
__a : Optional[Any] = original_sizes.numpy()
__a , __a , __a : int = self._check_and_preprocess_points(
input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , input_boxes=_UpperCAmelCase , )
__a : List[Any] = self._normalize_and_convert(
_UpperCAmelCase , _UpperCAmelCase , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , input_boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , )
return encoding_image_processor
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="pt" , ):
if input_points is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
__a : Dict = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , original_sizes[0] ) for point in input_points
]
else:
__a : Dict = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , _UpperCAmelCase )
for point, original_size in zip(_UpperCAmelCase , _UpperCAmelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__a , __a : Tuple = self._pad_points_and_labels(_UpperCAmelCase , _UpperCAmelCase )
__a : List[Any] = np.array(_UpperCAmelCase )
if input_labels is not None:
__a : List[Any] = np.array(_UpperCAmelCase )
if input_boxes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
__a : Any = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , original_sizes[0] , is_bounding_box=_UpperCAmelCase )
for box in input_boxes
]
else:
__a : int = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , _UpperCAmelCase , is_bounding_box=_UpperCAmelCase )
for box, original_size in zip(_UpperCAmelCase , _UpperCAmelCase )
]
__a : Optional[int] = np.array(_UpperCAmelCase )
if input_boxes is not None:
if return_tensors == "pt":
__a : Any = torch.from_numpy(_UpperCAmelCase )
# boxes batch size of 1 by default
__a : str = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__a : Dict = tf.convert_to_tensor(_UpperCAmelCase )
# boxes batch size of 1 by default
__a : str = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__a : int = torch.from_numpy(_UpperCAmelCase )
# point batch size of 1 by default
__a : Optional[Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__a : List[Any] = tf.convert_to_tensor(_UpperCAmelCase )
# point batch size of 1 by default
__a : Optional[Any] = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__a : Any = torch.from_numpy(_UpperCAmelCase )
# point batch size of 1 by default
__a : Union[str, Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__a : str = tf.convert_to_tensor(_UpperCAmelCase )
# point batch size of 1 by default
__a : Dict = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = max([point.shape[0] for point in input_points] )
__a : Dict = []
for i, point in enumerate(_UpperCAmelCase ):
if point.shape[0] != expected_nb_points:
__a : Any = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__a : List[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_UpperCAmelCase )
__a : int = processed_input_points
return input_points, input_labels
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
__a , __a : str = original_size
__a , __a : Optional[int] = self.image_processor._get_preprocess_shape(_UpperCAmelCase , longest_edge=_UpperCAmelCase )
__a : List[str] = deepcopy(_UpperCAmelCase ).astype(_UpperCAmelCase )
if is_bounding_box:
__a : Optional[int] = coords.reshape(-1 , 2 , 2 )
__a : str = coords[..., 0] * (new_w / old_w)
__a : List[Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__a : List[Any] = coords.reshape(-1 , 4 )
return coords
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if input_points is not None:
if hasattr(_UpperCAmelCase , '''numpy''' ): # Checks for TF or Torch tensor
__a : str = input_points.numpy().tolist()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_points[0] , _UpperCAmelCase ):
raise ValueError('''Input points must be a list of list of floating points.''' )
__a : str = [np.array(_UpperCAmelCase ) for input_point in input_points]
else:
__a : Optional[int] = None
if input_labels is not None:
if hasattr(_UpperCAmelCase , '''numpy''' ):
__a : Dict = input_labels.numpy().tolist()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_labels[0] , _UpperCAmelCase ):
raise ValueError('''Input labels must be a list of list integers.''' )
__a : Dict = [np.array(_UpperCAmelCase ) for label in input_labels]
else:
__a : Tuple = None
if input_boxes is not None:
if hasattr(_UpperCAmelCase , '''numpy''' ):
__a : List[Any] = input_boxes.numpy().tolist()
if (
not isinstance(_UpperCAmelCase , _UpperCAmelCase )
or not isinstance(input_boxes[0] , _UpperCAmelCase )
or not isinstance(input_boxes[0][0] , _UpperCAmelCase )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
__a : Optional[Any] = [np.array(_UpperCAmelCase ).astype(np.floataa ) for box in input_boxes]
else:
__a : Union[str, Any] = None
return input_points, input_labels, input_boxes
@property
def _lowerCamelCase ( self ):
__a : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(_UpperCAmelCase ) )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.image_processor.post_process_masks(*_UpperCAmelCase , **_UpperCAmelCase ) | 160 | 0 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__: List[str] = np.full((len(__UpperCAmelCase ), sequence_length, 2) , __UpperCAmelCase )
else:
lowercase__: Optional[int] = np.full((len(__UpperCAmelCase ), sequence_length) , __UpperCAmelCase )
for i, tensor in enumerate(__UpperCAmelCase ):
if padding_side == "right":
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__: Any = tensor[:sequence_length]
else:
lowercase__: List[str] = tensor[:sequence_length]
else:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__: Any = tensor[:sequence_length]
else:
lowercase__: Optional[int] = tensor[:sequence_length]
return out_tensor.tolist()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: Union[str, Any] = ord(__UpperCAmelCase )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
lowercase__: Union[str, Any] = unicodedata.category(__UpperCAmelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :PreTrainedTokenizerBase
_UpperCAmelCase :Union[bool, str, PaddingStrategy] = True
_UpperCAmelCase :Optional[int] = None
_UpperCAmelCase :Optional[int] = None
_UpperCAmelCase :int = -100
_UpperCAmelCase :str = "pt"
def _snake_case ( self , _UpperCAmelCase ):
import torch
lowercase__: str = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase__: Union[str, Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__: Tuple = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase__: Optional[Any] = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase__: Optional[Any] = self.tokenizer.padding_side
if padding_side == "right":
lowercase__: str = [
list(_UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(_UpperCAmelCase )) for label in labels
]
else:
lowercase__: int = [
[self.label_pad_token_id] * (sequence_length - len(_UpperCAmelCase )) + list(_UpperCAmelCase ) for label in labels
]
lowercase__: Tuple = [feature['''ner_tags'''] for feature in features]
lowercase__: Dict = padding_tensor(_UpperCAmelCase , -1 , _UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = [feature['''original_entity_spans'''] for feature in features]
lowercase__: Optional[Any] = padding_tensor(_UpperCAmelCase , (-1, -1) , _UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = {k: torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 351 | """simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = "codegen"
_UpperCAmelCase :Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: int = vocab_size
lowercase__: str = n_ctx
lowercase__: List[Any] = n_positions
lowercase__: Union[str, Any] = n_embd
lowercase__: Optional[Any] = n_layer
lowercase__: str = n_head
lowercase__: List[Any] = n_inner
lowercase__: Union[str, Any] = rotary_dim
lowercase__: Optional[Any] = activation_function
lowercase__: Union[str, Any] = resid_pdrop
lowercase__: Optional[int] = embd_pdrop
lowercase__: Optional[Any] = attn_pdrop
lowercase__: Optional[int] = layer_norm_epsilon
lowercase__: List[Any] = initializer_range
lowercase__: Tuple = use_cache
lowercase__: Any = bos_token_id
lowercase__: Any = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' )
lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Any = seqlen + 2
lowercase__: List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__: Optional[Any] = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Optional[Any] = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.