code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a__ ( ):
'''simple docstring'''
__magic_name__ , __magic_name__ = 9, 14 # noqa: F841
__magic_name__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__magic_name__ = defaultdict(A_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__magic_name__ = mst(A_ )
__magic_name__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__magic_name__ = tuple(answer[:2] )
__magic_name__ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 88 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=("DownEncoderBlock2D",) , UpperCamelCase__ : Optional[Any]=(64,) , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : List[str]=True , ) -> str:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
# down
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = 2 * out_channels if double_z else out_channels
__magic_name__ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = x
__magic_name__ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : int ):
def custom_forward(*UpperCamelCase__ : str ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
__magic_name__ = down_block(UpperCamelCase__ )
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ )
# post-process
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[Any]=("UpDecoderBlock2D",) , UpperCamelCase__ : List[Any]=(64,) , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[int]="silu" , UpperCamelCase__ : Tuple="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
__magic_name__ = in_channels if norm_type == """spatial""" else None
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
__magic_name__ = list(reversed(UpperCamelCase__ ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
__magic_name__ = output_channel
# out
if norm_type == "spatial":
__magic_name__ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None ) -> Tuple:
"""simple docstring"""
__magic_name__ = z
__magic_name__ = self.conv_in(UpperCamelCase__ )
__magic_name__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Optional[int] ):
def custom_forward(*UpperCamelCase__ : int ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
else:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict="random" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=True ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = n_e
__magic_name__ = vq_embed_dim
__magic_name__ = beta
__magic_name__ = legacy
__magic_name__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__magic_name__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__magic_name__ = self.used.shape[0]
__magic_name__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__magic_name__ = self.re_embed
__magic_name__ = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__magic_name__ = n_e
__magic_name__ = sane_index_shape
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
__magic_name__ = (inds[:, :, None] == used[None, None, ...]).long()
__magic_name__ = match.argmax(-1 )
__magic_name__ = match.sum(2 ) < 1
if self.unknown_index == "random":
__magic_name__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__magic_name__ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
__magic_name__ = 0 # simply set to zero
__magic_name__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
__magic_name__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__magic_name__ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
__magic_name__ = self.embedding(UpperCamelCase__ ).view(z.shape )
__magic_name__ = None
__magic_name__ = None
# compute loss for embedding
if not self.legacy:
__magic_name__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__magic_name__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__magic_name__ = z + (z_q - z).detach()
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__magic_name__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__magic_name__ = self.remap_to_used(UpperCamelCase__ )
__magic_name__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__magic_name__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
if self.remap is not None:
__magic_name__ = indices.reshape(shape[0] , -1 ) # add batch axis
__magic_name__ = self.unmap_to_all(UpperCamelCase__ )
__magic_name__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__magic_name__ = self.embedding(UpperCamelCase__ )
if shape is not None:
__magic_name__ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parameters
__magic_name__ , __magic_name__ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
__magic_name__ = torch.clamp(self.logvar , -30.0 , 20.0 )
__magic_name__ = deterministic
__magic_name__ = torch.exp(0.5 * self.logvar )
__magic_name__ = torch.exp(self.logvar )
if self.deterministic:
__magic_name__ = __magic_name__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
__magic_name__ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__magic_name__ = self.mean + self.std * sample
return x
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
__magic_name__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.mean
| 88 | 1 |
def a__ ( A_ ):
'''simple docstring'''
stooge(A_, 0, len(A_ ) - 1 )
return arr
def a__ ( A_, A_, A_ ):
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__magic_name__ , __magic_name__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__magic_name__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(A_, A_, (h - t) )
# Recursively sort last 2/3 elements
stooge(A_, i + t, (A_) )
# Recursively sort first 2/3 elements
stooge(A_, A_, (h - t) )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : Any = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 88 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Any=[1, 2, 1] , UpperCamelCase__ : int=[2, 2, 4] , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=2.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCamelCase__ : Tuple=[1, 2, 3] , ) -> Dict:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
__magic_name__ = window_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_absolute_embeddings
__magic_name__ = patch_norm
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = is_training
__magic_name__ = scope
__magic_name__ = use_labels
__magic_name__ = type_sequence_label_size
__magic_name__ = encoder_stride
__magic_name__ = out_features
__magic_name__ = out_indices
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase__ ):
__magic_name__ = ["""stem"""]
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swin has a different seq_length
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCamelCase__ : Union[str, Any] ):
__magic_name__ = 0
return t
def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int={} ):
with torch.no_grad():
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has'''
F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.'''
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _A ):
'''simple docstring'''
a__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ = MaskFormerSwinConfig
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__magic_name__ = backbone_class(UpperCamelCase__ )
backbone.to(UpperCamelCase__ )
backbone.eval()
__magic_name__ = backbone(**UpperCamelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__magic_name__ = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__magic_name__ , __magic_name__ , __magic_name__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__magic_name__ = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertIsNotNone(outputs.attentions )
| 88 | 1 |
def a__ ( A_, A_ = False ):
'''simple docstring'''
if not isinstance(A_, A_ ):
__magic_name__ = f'''Expected string as input, found {type(A_ )}'''
raise ValueError(A_ )
if not isinstance(A_, A_ ):
__magic_name__ = f'''Expected boolean as use_pascal parameter, found {type(A_ )}'''
raise ValueError(A_ )
__magic_name__ = input_str.split("""_""" )
__magic_name__ = 0 if use_pascal else 1
__magic_name__ = words[start_index:]
__magic_name__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
__magic_name__ = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 88 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : int ) -> None:
"""simple docstring"""
__magic_name__ = value
__magic_name__ = None
__magic_name__ = None
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Node ) -> None:
"""simple docstring"""
__magic_name__ = tree
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
import unittest
import numpy as np
def a__ ( A_, A_, A_, A_ = None, ):
'''simple docstring'''
__magic_name__ = np.shape(A_ )
__magic_name__ = np.shape(A_ )
__magic_name__ = np.shape(A_ )
if shape_a[0] != shape_b[0]:
__magic_name__ = (
"""Expected the same number of rows for A and B. """
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(A_ )
if shape_b[1] != shape_c[1]:
__magic_name__ = (
"""Expected the same number of columns for B and C. """
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(A_ )
__magic_name__ = pseudo_inv
if a_inv is None:
try:
__magic_name__ = np.linalg.inv(A_ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : int ) -> None:
"""simple docstring"""
__magic_name__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__magic_name__ = np.array([[0, 3], [3, 0], [2, 3]] )
__magic_name__ = np.array([[2, 1], [6, 3]] )
__magic_name__ = schur_complement(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = np.block([[a, b], [b.T, c]] )
__magic_name__ = np.linalg.det(UpperCamelCase__ )
__magic_name__ = np.linalg.det(UpperCamelCase__ )
__magic_name__ = np.linalg.det(UpperCamelCase__ )
self.assertAlmostEqual(UpperCamelCase__ , det_a * det_s )
def _lowercase ( self : List[Any] ) -> None:
"""simple docstring"""
__magic_name__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__magic_name__ = np.array([[0, 3], [3, 0], [2, 3]] )
__magic_name__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCamelCase__ ):
schur_complement(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : int ) -> None:
"""simple docstring"""
__magic_name__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__magic_name__ = np.array([[0, 3], [3, 0], [2, 3]] )
__magic_name__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCamelCase__ ):
schur_complement(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 88 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : str = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__ = Features({"""text""": Value("""string""" )} )
a__ = Features({"""labels""": ClassLabel} )
a__ = "text"
a__ = "labels"
def _lowercase ( self : Tuple , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
__magic_name__ = copy.deepcopy(self )
__magic_name__ = self.label_schema.copy()
__magic_name__ = features[self.label_column]
__magic_name__ = label_schema
return task_template
@property
def _lowercase ( self : Optional[int] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 88 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[str] , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__magic_name__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sgugger/tiny-distilbert-classification"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = """patrickvonplaten/t5-tiny-random"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , configs=[config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , """env.csv""" ) , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """env.csv""" ) ).exists() )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(UpperCamelCase__ : Dict ):
self.assertTrue(hasattr(UpperCamelCase__ , """sequential""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """cumulative""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """current""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , """log.txt""" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """log.txt""" ) ).exists() )
| 88 | 1 |
def a__ ( A_, A_ ):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(A_, x % y )
def a__ ( A_, A_ ):
'''simple docstring'''
return (x * y) // greatest_common_divisor(A_, A_ )
def a__ ( A_ = 20 ):
'''simple docstring'''
__magic_name__ = 1
for i in range(1, n + 1 ):
__magic_name__ = lcm(A_, A_ )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 88 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__lowerCAmelCase : Optional[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__lowerCAmelCase : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a__ ( A_ ):
'''simple docstring'''
return x[0]
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_letter_count(A_ )
__magic_name__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A_ )
__magic_name__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A_ )
__magic_name__ = """""".join(freq_to_letter[freq] )
__magic_name__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A_, reverse=A_ )
__magic_name__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_frequency_order(A_ )
__magic_name__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
from functools import lru_cache
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = 2
__magic_name__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(A_ )
if n > 1:
factors.add(A_ )
return factors
@lru_cache
def a__ ( A_ ):
'''simple docstring'''
return len(unique_prime_factors(A_ ) )
def a__ ( A_ ):
'''simple docstring'''
return len(set(A_ ) ) in (0, 1)
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = 2
while True:
# Increment each value of a generated range
__magic_name__ = [base + i for i in range(A_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__magic_name__ = [upf_len(A_ ) for x in group]
checker.append(A_ )
# If all numbers in the list are equal, return the group variable.
if equality(A_ ):
return group
# Increment our base variable by 1
base += 1
def a__ ( A_ = 4 ):
'''simple docstring'''
__magic_name__ = run(A_ )
return results[0] if len(A_ ) else None
if __name__ == "__main__":
print(solution())
| 88 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCAmelCase : Any = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def a__ ( A_=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_A ) )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = None
a__ = None
def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
__magic_name__ = dataset_module_factory(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
__magic_name__ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
__magic_name__ = builder_cls(
cache_dir=UpperCamelCase__ , config_name=UpperCamelCase__ , hash=dataset_module.hash , )
__magic_name__ = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCamelCase__ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__magic_name__ = cached_path(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__magic_name__ = None
builder_instance.download_and_prepare()
__magic_name__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path, dataset=A_ )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
__magic_name__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(A_, A_ )
assert "train" in ds
assert isinstance(ds["""train"""], A_ )
assert next(iter(ds["""train"""] ) )
| 88 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( A_, A_, A_=None, A_=None ):
'''simple docstring'''
if attention_mask is None:
__magic_name__ = tf.cast(tf.math.not_equal(A_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
a__ = OPTConfig
a__ = {}
a__ = """gelu"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : Tuple=99 , UpperCamelCase__ : str=16 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[str]=20 , UpperCamelCase__ : int=2 , UpperCamelCase__ : str=1 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : str=16 , ) -> str:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = embed_dim
__magic_name__ = word_embed_proj_dim
__magic_name__ = False
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=UpperCamelCase__ , **self.config_updates , )
__magic_name__ = prepare_opt_inputs_dict(UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def _lowercase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = TFOPTModel(config=UpperCamelCase__ )
__magic_name__ = inputs_dict["""input_ids"""]
__magic_name__ = input_ids[:1, :]
__magic_name__ = inputs_dict["""attention_mask"""][:1, :]
__magic_name__ = 1
# first forward pass
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
__magic_name__ , __magic_name__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ = tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ = output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
@require_tf
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a__ = (TFOPTForCausalLM,) if is_tf_available() else ()
a__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a__ = False
a__ = False
a__ = False
a__ = 10
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = TFOPTModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(UpperCamelCase__ : Any , UpperCamelCase__ : str ):
if hasattr(UpperCamelCase__ , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(UpperCamelCase__ , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__magic_name__ = model_class(config=UpperCamelCase__ )
__magic_name__ = _get_word_embedding_weight(UpperCamelCase__ , model.get_input_embeddings() )
__magic_name__ = _get_word_embedding_weight(UpperCamelCase__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(UpperCamelCase__ )
__magic_name__ = _get_word_embedding_weight(UpperCamelCase__ , model.get_input_embeddings() )
__magic_name__ = _get_word_embedding_weight(UpperCamelCase__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__magic_name__ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , UpperCamelCase__ )
# check that weights remain the same after resizing
__magic_name__ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__magic_name__ = False
self.assertTrue(UpperCamelCase__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , UpperCamelCase__ )
__magic_name__ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__magic_name__ = False
self.assertTrue(UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
return tf.constant(A_, dtype=tf.intaa )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = 99
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__magic_name__ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__magic_name__ = input_ids.shape[0]
__magic_name__ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
__magic_name__ = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__magic_name__ = tf.not_equal(UpperCamelCase__ , model.config.pad_token_id )
with tf.GradientTape():
__magic_name__ = model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ ).last_hidden_state
__magic_name__ = (1, 11, 512)
self.assertEqual(output.shape , UpperCamelCase__ )
__magic_name__ = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=4E-3 ) )
__magic_name__ = tf.function(UpperCamelCase__ , jit_compile=UpperCamelCase__ )
__magic_name__ = xla_generate(UpperCamelCase__ , UpperCamelCase__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=4E-2 ) )
@require_tf
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
super().setUp()
__magic_name__ = """facebook/opt-350m"""
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = TFOPTForCausalLM.from_pretrained(self.path_model )
__magic_name__ = GPTaTokenizer.from_pretrained(self.path_model )
__magic_name__ = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""tf""" , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
__magic_name__ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__magic_name__ = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-4 ) )
__magic_name__ = tf.function(UpperCamelCase__ , jit_compile=UpperCamelCase__ )
__magic_name__ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-4 ) )
@require_tf
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """facebook/opt-125m"""
__magic_name__ = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__magic_name__ = []
__magic_name__ = GPTaTokenizer.from_pretrained(UpperCamelCase__ )
__magic_name__ = TFOPTForCausalLM.from_pretrained(UpperCamelCase__ )
for prompt in self.prompts:
__magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""tf""" ).input_ids
__magic_name__ = model.generate(UpperCamelCase__ , max_length=10 )
__magic_name__ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ = """facebook/opt-350m"""
__magic_name__ = GPTaTokenizer.from_pretrained(UpperCamelCase__ )
__magic_name__ = TFOPTForCausalLM.from_pretrained(UpperCamelCase__ )
__magic_name__ = """left"""
# use different length sentences to test batching
__magic_name__ = [
"""Hello, my dog is a little""",
"""Today, I""",
]
__magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""tf""" , padding=UpperCamelCase__ )
__magic_name__ = inputs["""input_ids"""]
__magic_name__ = model.generate(input_ids=UpperCamelCase__ , attention_mask=inputs["""attention_mask"""] )
__magic_name__ = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
__magic_name__ = model.generate(input_ids=UpperCamelCase__ )
__magic_name__ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
__magic_name__ = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
__magic_name__ = model.generate(input_ids=UpperCamelCase__ , max_length=model.config.max_length - num_paddings )
__magic_name__ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
__magic_name__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__ )
__magic_name__ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__ )
__magic_name__ = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """facebook/opt-350m"""
__magic_name__ = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
__magic_name__ = []
__magic_name__ = GPTaTokenizer.from_pretrained(UpperCamelCase__ )
__magic_name__ = TFOPTForCausalLM.from_pretrained(UpperCamelCase__ )
for prompt in self.prompts:
__magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""tf""" ).input_ids
__magic_name__ = model.generate(UpperCamelCase__ , max_length=10 )
__magic_name__ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 88 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = torch.nn.Linear(10 , 10 )
__magic_name__ = torch.optim.SGD(model.parameters() , 0.1 )
__magic_name__ = Accelerator()
__magic_name__ = accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 88 | 1 |
import socket
def a__ ( ):
'''simple docstring'''
__magic_name__ = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
__magic_name__ = socket.gethostname()
__magic_name__ = 12312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""", """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
__magic_name__ = sock.recv(1024 )
if not data:
break
out_file.write(A_ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 88 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Optional[int] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=1 ) -> str:
"""simple docstring"""
__magic_name__ = tokenizer
__magic_name__ = dataset
__magic_name__ = len(UpperCamelCase__ ) if n_tasks is None else n_tasks
__magic_name__ = n_copies
def __iter__( self : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
__magic_name__ = self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = start_length
__magic_name__ = eof_strings
__magic_name__ = tokenizer
def __call__( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__magic_name__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = re.split("""(%s)""" % """|""".join(A_ ), A_ )
# last string should be ""
return "".join(string_list[:-2] )
def a__ ( A_, A_, A_, A_, A_, A_=20, **A_ ):
'''simple docstring'''
__magic_name__ = defaultdict(A_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A_ ) ):
with torch.no_grad():
__magic_name__ = batch["""ids"""].shape[-1]
__magic_name__ = accelerator.unwrap_model(A_ ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]], num_return_sequences=A_, **A_ )
# each task is generated batch_size times
__magic_name__ = batch["""task_id"""].repeat(A_ )
__magic_name__ = accelerator.pad_across_processes(
A_, dim=1, pad_index=tokenizer.pad_token_id )
__magic_name__ , __magic_name__ = accelerator.gather((generated_tokens, generated_tasks) )
__magic_name__ = generated_tokens.cpu().numpy()
__magic_name__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A_, A_ ):
gen_token_dict[task].append(A_ )
__magic_name__ = [[] for _ in range(A_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__magic_name__ = tokenizer.decode(A_, skip_special_tokens=A_, clean_up_tokenization_spaces=A_ )
code_gens[task].append(remove_last_block(A_ ) )
return code_gens
def a__ ( ):
'''simple docstring'''
__magic_name__ = HfArgumentParser(A_ )
__magic_name__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__magic_name__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__magic_name__ = """false"""
if args.num_workers is None:
__magic_name__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__magic_name__ = Accelerator()
set_seed(args.seed, device_specific=A_ )
# Load model and tokenizer
__magic_name__ = AutoTokenizer.from_pretrained(args.model_ckpt )
__magic_name__ = tokenizer.eos_token
__magic_name__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__magic_name__ = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0, A_, A_ )] ),
}
# Load evaluation dataset and metric
__magic_name__ = load_dataset("""openai_humaneval""" )
__magic_name__ = load_metric("""code_eval""" )
__magic_name__ = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
__magic_name__ = args.n_samples // args.batch_size
__magic_name__ = TokenizedDataset(A_, human_eval["""test"""], n_copies=A_, n_tasks=A_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__magic_name__ = DataLoader(A_, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__magic_name__ = code_eval_metric.compute(references=[""""""], predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
__magic_name__ , __magic_name__ = accelerator.prepare(A_, A_ )
__magic_name__ = complete_code(
A_, A_, A_, A_, n_tasks=A_, batch_size=args.batch_size, **A_, )
if accelerator.is_main_process:
__magic_name__ = []
for task in tqdm(range(A_ ) ):
__magic_name__ = human_eval["""test"""][task]["""test"""]
__magic_name__ = f'''check({human_eval['test'][task]['entry_point']})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
__magic_name__ , __magic_name__ = code_eval_metric.compute(
references=A_, predictions=A_, num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file, """w""" ) as fp:
json.dump(A_, A_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 88 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_A ) , """Tatoeba directory does not exist.""" )
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.resolver.convert_models(["""heb-eng"""] )
@slow
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 88 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
'''simple docstring'''
__magic_name__ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""", type=A_, default=1, help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""", type=A_, help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
), )
# rest from the training program
parser.add_argument("""training_script_args""", nargs=A_ )
return parser.parse_args()
def a__ ( ):
'''simple docstring'''
__magic_name__ = parse_args()
# Import training_script as a module.
__magic_name__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__magic_name__ = script_fpath.stem
__magic_name__ = importlib.import_module(A_ )
# Patch sys.argv
__magic_name__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 88 | 1 |
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = len(A_ )
for i in range(length - 1 ):
__magic_name__ = i
for k in range(i + 1, A_ ):
if collection[k] < collection[least]:
__magic_name__ = k
if least != i:
__magic_name__ , __magic_name__ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : str = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 88 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pegasus"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
| 88 | 1 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a__ ( A_ ):
'''simple docstring'''
return "".join(sorted(A_ ) )
def a__ ( A_ ):
'''simple docstring'''
return word_by_signature[signature(A_ )]
__lowerCAmelCase : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__lowerCAmelCase : Union[str, Any] = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCAmelCase : str = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCAmelCase : str = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 88 |
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Optional[int] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : Optional[int] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Tuple=False , ) -> Dict:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in predictions] )
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in references] )
else:
__magic_name__ = np.asarray(UpperCamelCase__ )
__magic_name__ = np.asarray(UpperCamelCase__ )
if ignore_case:
__magic_name__ = np.char.lower(UpperCamelCase__ )
__magic_name__ = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
__magic_name__ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
__magic_name__ = string.digits.maketrans("""""" , """""" , string.digits )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 88 | 1 |
from math import factorial
def a__ ( A_, A_ ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(A_ ) // (factorial(A_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
F'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 88 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = emb.weight.shape
__magic_name__ = nn.Linear(A_, A_, bias=A_ )
__magic_name__ = emb.weight.data
return lin_layer
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = torch.load(A_, map_location="""cpu""" )
__magic_name__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
__magic_name__ = checkpoint["""model"""]
remove_ignore_keys_(A_ )
__magic_name__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
__magic_name__ = {key.replace("""decoder""", """model""" ): val for key, val in state_dict.items()}
__magic_name__ = XGLMConfig(
vocab_size=A_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""gelu""", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
__magic_name__ = XGLMForCausalLM(A_ )
__magic_name__ = model.load_state_dict(A_, strict=A_ )
print(A_ )
__magic_name__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 88 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCAmelCase : Any = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def a__ ( A_, A_, A_=8 ):
'''simple docstring'''
__magic_name__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__magic_name__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def a__ ( A_, A_=512, A_=512 ):
'''simple docstring'''
__magic_name__ = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1 )
__magic_name__ = np.array(pil_image.convert("""RGB""" ) )
__magic_name__ = arr.astype(np.floataa ) / 127.5 - 1
__magic_name__ = np.transpose(A_, [2, 0, 1] )
__magic_name__ = torch.from_numpy(A_ ).unsqueeze(0 )
return image
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : DDPMScheduler , UpperCamelCase__ : VQModel , ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , movq=UpperCamelCase__ , )
__magic_name__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
__magic_name__ = min(int(num_inference_steps * strength ) , UpperCamelCase__ )
__magic_name__ = max(num_inference_steps - init_timestep , 0 )
__magic_name__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase__ )}''' )
__magic_name__ = image.to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
__magic_name__ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__magic_name__ = image
else:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(UpperCamelCase__ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase__ )
]
__magic_name__ = torch.cat(UpperCamelCase__ , dim=0 )
else:
__magic_name__ = self.movq.encode(UpperCamelCase__ ).latent_dist.sample(UpperCamelCase__ )
__magic_name__ = self.movq.config.scaling_factor * init_latents
__magic_name__ = torch.cat([init_latents] , dim=0 )
__magic_name__ = init_latents.shape
__magic_name__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
# get latents
__magic_name__ = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = init_latents
return latents
def _lowercase ( self : Tuple , UpperCamelCase__ : Tuple=0 ) -> List[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__magic_name__ = torch.device(F'''cuda:{gpu_id}''' )
__magic_name__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Tuple , UpperCamelCase__ : Any=0 ) -> str:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__magic_name__ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__magic_name__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
__magic_name__ , __magic_name__ = cpu_offload_with_hook(UpperCamelCase__ , UpperCamelCase__ , prev_module_hook=UpperCamelCase__ )
# We'll offload the last model manually.
__magic_name__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self : List[str] ) -> List[str]:
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , UpperCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 100 , UpperCamelCase__ : float = 4.0 , UpperCamelCase__ : float = 0.3 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ) -> int:
"""simple docstring"""
__magic_name__ = self._execution_device
__magic_name__ = guidance_scale > 1.0
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = torch.cat(UpperCamelCase__ , dim=0 )
__magic_name__ = image_embeds.shape[0]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = torch.cat(UpperCamelCase__ , dim=0 )
if do_classifier_free_guidance:
__magic_name__ = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
__magic_name__ = negative_image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
__magic_name__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = [image]
if not all(isinstance(UpperCamelCase__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(UpperCamelCase__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
__magic_name__ = torch.cat([prepare_image(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for i in image] , dim=0 )
__magic_name__ = image.to(dtype=image_embeds.dtype , device=UpperCamelCase__ )
__magic_name__ = self.movq.encode(UpperCamelCase__ )["""latents"""]
__magic_name__ = latents.repeat_interleave(UpperCamelCase__ , dim=0 )
self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__ )
__magic_name__ , __magic_name__ = self.get_timesteps(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__magic_name__ , __magic_name__ = downscale_height_and_width(UpperCamelCase__ , UpperCamelCase__ , self.movq_scale_factor )
__magic_name__ = self.prepare_latents(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , image_embeds.dtype , UpperCamelCase__ , UpperCamelCase__ )
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__magic_name__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ = {"""image_embeds""": image_embeds}
__magic_name__ = self.unet(
sample=UpperCamelCase__ , timestep=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , added_cond_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ = noise_pred.split(latents.shape[1] , dim=1 )
__magic_name__ , __magic_name__ = noise_pred.chunk(2 )
__magic_name__ , __magic_name__ = variance_pred.chunk(2 )
__magic_name__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__magic_name__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__magic_name__ , __magic_name__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ = self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ , )[0]
# post-processing
__magic_name__ = self.movq.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__magic_name__ = image * 0.5 + 0.5
__magic_name__ = image.clamp(0 , 1 )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 88 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Any = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
__lowerCAmelCase : str = '|'.join(sys.argv[1:])
__lowerCAmelCase : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 88 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def a__ ( A_, A_=False ):
'''simple docstring'''
try:
__magic_name__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__magic_name__ = default
else:
# KEY is set, convert it to True or False.
try:
__magic_name__ = strtobool(A_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__lowerCAmelCase : Tuple = parse_flag_from_env('RUN_SLOW', default=False)
__lowerCAmelCase : str = parse_flag_from_env('RUN_REMOTE', default=False)
__lowerCAmelCase : List[Any] = parse_flag_from_env('RUN_LOCAL', default=True)
__lowerCAmelCase : List[str] = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
__lowerCAmelCase : Optional[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
__lowerCAmelCase : Optional[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
__lowerCAmelCase : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
__lowerCAmelCase : Tuple = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
__lowerCAmelCase : Union[str, Any] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
__lowerCAmelCase : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
__lowerCAmelCase : Optional[Any] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def a__ ( A_ ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__magic_name__ = unittest.skip("""test requires faiss""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__magic_name__ = unittest.skip("""test requires regex""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__magic_name__ = unittest.skip("""test requires elasticsearch""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__magic_name__ = unittest.skip("""test requires sqlalchemy""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__magic_name__ = unittest.skip("""test requires PyTorch""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__magic_name__ = unittest.skip("""test requires TensorFlow""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__magic_name__ = unittest.skip("""test requires JAX""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__magic_name__ = unittest.skip("""test requires Pillow""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(A_ )
else:
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(A_ )
else:
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(A_ )
else:
return test_case
def a__ ( A_ ):
'''simple docstring'''
def _require_spacy_model(A_ ):
try:
import spacy # noqa F401
spacy.load(A_ )
except ImportError:
return unittest.skip("""test requires spacy""" )(A_ )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(A_ ) )(A_ )
else:
return test_case
return _require_spacy_model
def a__ ( A_ ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(A_ )
else:
return test_case
def a__ ( A_ ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(A_ )
else:
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__magic_name__ = unittest.skip("""test is slow""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__magic_name__ = unittest.skip("""test is local""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__magic_name__ = unittest.skip("""test is packaged""" )(A_ )
return test_case
def a__ ( A_ ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__magic_name__ = unittest.skip("""test requires remote""" )(A_ )
return test_case
def a__ ( *A_ ):
'''simple docstring'''
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(A_ ) and name.startswith("""test""" ):
for decorator in decorators:
__magic_name__ = decorator(A_ )
setattr(cls, A_, A_ )
return cls
return decorate
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
pass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 0
a__ = 1
a__ = 2
@contextmanager
def a__ ( A_=OfflineSimulationMode.CONNECTION_FAILS, A_=1e-16 ):
'''simple docstring'''
__magic_name__ = requests.Session().request
def timeout_request(A_, A_, A_, **A_ ):
# Change the url to an invalid url so that the connection hangs
__magic_name__ = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
__magic_name__ = timeout
try:
return online_request(A_, A_, **A_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__magic_name__ = url
__magic_name__ = e.args[0]
__magic_name__ = (max_retry_error.args[0].replace("""10.255.255.1""", f'''OfflineMock[{url}]''' ),)
__magic_name__ = (max_retry_error,)
raise
def raise_connection_error(A_, A_, **A_ ):
raise requests.ConnectionError("""Offline mode is enabled.""", request=A_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""", A_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""", A_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""", A_ ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def a__ ( *A_, **A_ ):
'''simple docstring'''
__magic_name__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*A_, **A_ ) as tmp_dir:
try:
os.chdir(A_ )
yield
finally:
os.chdir(A_ )
@contextmanager
def a__ ( ):
'''simple docstring'''
import gc
gc.collect()
__magic_name__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def a__ ( ):
'''simple docstring'''
import gc
gc.collect()
__magic_name__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def a__ ( A_, A_ ):
'''simple docstring'''
return deepcopy(A_ ).integers(0, 100, 10 ).tolist() == deepcopy(A_ ).integers(0, 100, 10 ).tolist()
def a__ ( A_ ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(A_, *A_, **A_ ):
try:
return func(*A_, **A_ )
except HTTPError as err:
if str(A_ ).startswith("""500""" ) or str(A_ ).startswith("""502""" ):
pytest.xfail(str(A_ ) )
raise err
return decorator.decorator(_wrapper, A_ )
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = returncode
__magic_name__ = stdout
__magic_name__ = stderr
async def a__ ( A_, A_ ):
'''simple docstring'''
while True:
__magic_name__ = await stream.readline()
if line:
callback(A_ )
else:
break
async def a__ ( A_, A_=None, A_=None, A_=None, A_=False, A_=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """, """ """.join(A_ ) )
__magic_name__ = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=A_, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=A_, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__magic_name__ = []
__magic_name__ = []
def tee(A_, A_, A_, A_="" ):
__magic_name__ = line.decode("""utf-8""" ).rstrip()
sink.append(A_ )
if not quiet:
print(A_, A_, file=A_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda A_ : tee(A_, A_, sys.stdout, label="""stdout:""" ) ),
_read_stream(p.stderr, lambda A_ : tee(A_, A_, sys.stderr, label="""stderr:""" ) ),
], timeout=A_, )
return _RunOutput(await p.wait(), A_, A_ )
def a__ ( A_, A_=None, A_=None, A_=180, A_=False, A_=True ):
'''simple docstring'''
__magic_name__ = asyncio.get_event_loop()
__magic_name__ = loop.run_until_complete(
_stream_subprocess(A_, env=A_, stdin=A_, timeout=A_, quiet=A_, echo=A_ ) )
__magic_name__ = """ """.join(A_ )
if result.returncode > 0:
__magic_name__ = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def a__ ( ):
'''simple docstring'''
__magic_name__ = os.environ.get("""PYTEST_XDIST_WORKER""", """gw0""" )
__magic_name__ = re.sub(R"""^gw""", """""", A_, 0, re.M )
return int(A_ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = 29500
__magic_name__ = pytest_xdist_worker_id()
return port + uniq_delta
| 88 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=99 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=36 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Union[str, Any]=6 , UpperCamelCase__ : int=37 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=16 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Dict=None , ) -> Any:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = embedding_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_hidden_groups
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowercase ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = AlbertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
__magic_name__ = AlbertForPreTraining(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , sentence_order_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = AlbertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = AlbertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = AlbertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = AlbertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.num_choices
__magic_name__ = AlbertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
def _lowercase ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def _lowercase ( self : int ) -> int:
"""simple docstring"""
__magic_name__ = AlbertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = AlbertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = AlbertModel.from_pretrained("""albert-base-v2""" )
__magic_name__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__magic_name__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__magic_name__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) )
| 88 | 1 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = """laion/clap-htsat-unfused"""
__magic_name__ = tempfile.mkdtemp()
def _lowercase ( self : str , **UpperCamelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase__ )
def _lowercase ( self : str , **UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : int ) -> int:
"""simple docstring"""
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_feature_extractor()
__magic_name__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
__magic_name__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ = self.get_feature_extractor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
__magic_name__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
__magic_name__ = floats_list((3, 1000) )
__magic_name__ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" )
__magic_name__ = processor(audios=UpperCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
__magic_name__ = """This is a test string"""
__magic_name__ = processor(text=UpperCamelCase__ )
__magic_name__ = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
__magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ = processor.batch_decode(UpperCamelCase__ )
__magic_name__ = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 88 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """biogpt"""
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=4_2384 , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : Any=24 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Tuple=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : str=1024 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[str]=1E-12 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : List[str]=2 , **UpperCamelCase__ : Optional[int] , ) -> Tuple:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = scale_embedding
__magic_name__ = use_cache
__magic_name__ = layerdrop
__magic_name__ = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 88 | 1 |
__lowerCAmelCase : Optional[int] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 88 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowerCAmelCase : Any = get_logger(__name__)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = (
os.path.join(UpperCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__magic_name__ = Extractor
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__magic_name__ = os.path.abspath(UpperCamelCase__ )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase__ ) )
def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(UpperCamelCase__ ) and not (os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ ))
)
def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> str:
"""simple docstring"""
__magic_name__ = self.extractor.infer_extractor_format(UpperCamelCase__ )
if not extractor_format:
return input_path
__magic_name__ = self._get_output_path(UpperCamelCase__ )
if self._do_extract(UpperCamelCase__ , UpperCamelCase__ ):
self.extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return output_path
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
@abstractmethod
def _lowercase ( cls : List[str] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : Union[str, Any] ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
...
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
a__ = []
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ , """rb""" ) as f:
return f.read(UpperCamelCase__ )
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
__magic_name__ = max(len(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
try:
__magic_name__ = cls.read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(UpperCamelCase__ )
@staticmethod
def _lowercase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
def resolved(UpperCamelCase__ : str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase__ ) )
def badpath(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ).startswith(UpperCamelCase__ )
def badlink(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
__magic_name__ = resolved(os.path.join(UpperCamelCase__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase__ )
__magic_name__ = resolved(UpperCamelCase__ )
for finfo in members:
if badpath(finfo.name , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = tarfile.open(UpperCamelCase__ )
tar_file.extractall(UpperCamelCase__ , members=TarExtractor.safemembers(UpperCamelCase__ , UpperCamelCase__ ) )
tar_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x1F\x8B"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(UpperCamelCase__ , """rb""" ) as gzip_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase__ , """rb""" ) as fp:
__magic_name__ = _EndRecData(UpperCamelCase__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__magic_name__ = fp.read(UpperCamelCase__ ) # CD is where we expect it to be
if len(UpperCamelCase__ ) == sizeCentralDir:
__magic_name__ = struct.unpack(UpperCamelCase__ , UpperCamelCase__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with zipfile.ZipFile(UpperCamelCase__ , """r""" ) as zip_file:
zip_file.extractall(UpperCamelCase__ )
zip_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(UpperCamelCase__ ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = rarfile.RarFile(UpperCamelCase__ )
rf.extractall(UpperCamelCase__ )
rf.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__magic_name__ = zstd.ZstdDecompressor()
with open(UpperCamelCase__ , """rb""" ) as ifh, open(UpperCamelCase__ , """wb""" ) as ofh:
dctx.copy_stream(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x42\x5A\x68"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with pyazr.SevenZipFile(UpperCamelCase__ , """r""" ) as archive:
archive.extractall(UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ :
'''simple docstring'''
a__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowercase ( cls : Tuple ) -> Tuple:
"""simple docstring"""
return max(
len(UpperCamelCase__ )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase__ , UpperCamelCase__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase__ , magic_number_length=UpperCamelCase__ )
except OSError:
return b""
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = cls.infer_extractor_format(UpperCamelCase__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowercase ( cls : Dict , UpperCamelCase__ : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
__magic_name__ = cls._get_magic_number_max_length()
__magic_name__ = cls._read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return extractor_format
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(UpperCamelCase__ ) , exist_ok=UpperCamelCase__ )
# Prevent parallel extractions
__magic_name__ = str(Path(UpperCamelCase__ ).with_suffix(""".lock""" ) )
with FileLock(UpperCamelCase__ ):
shutil.rmtree(UpperCamelCase__ , ignore_errors=UpperCamelCase__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = extractor if extractor != """deprecated""" else extractor_format
else:
__magic_name__ = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=UpperCamelCase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase__ ):
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
| 88 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__lowerCAmelCase : int = False
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any]=32 ) -> List[str]:
"""simple docstring"""
set_seed(0 )
__magic_name__ = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
__magic_name__ = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__magic_name__ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
__magic_name__ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="""linear""" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__magic_name__ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
__magic_name__ = [torch.randn((4, 3, 32, 32) ).to(UpperCamelCase__ ) for _ in range(4 )]
__magic_name__ = [torch.randint(0 , 1000 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
__magic_name__ , __magic_name__ = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__magic_name__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__magic_name__ = model(UpperCamelCase__ , timesteps[i] ).sample
__magic_name__ = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__magic_name__ , __magic_name__ = self.get_model_optimizer(resolution=32 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__magic_name__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__magic_name__ = model(UpperCamelCase__ , timesteps[i] ).sample
__magic_name__ = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 88 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 1 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = """ssube/stable-diffusion-x4-upscaler-onnx"""
def _lowercase ( self : List[str] , UpperCamelCase__ : Dict=0 ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase__ ) )
__magic_name__ = torch.manual_seed(UpperCamelCase__ )
__magic_name__ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ = self.get_dummy_inputs()
__magic_name__ = pipe(**UpperCamelCase__ ).images
__magic_name__ = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
__magic_name__ = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__magic_name__ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ = self.get_dummy_inputs()
__magic_name__ = pipe(**UpperCamelCase__ ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__magic_name__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ = self.get_dummy_inputs()
__magic_name__ = pipe(**UpperCamelCase__ ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__magic_name__ = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ = self.get_dummy_inputs()
__magic_name__ = pipe(**UpperCamelCase__ ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
__magic_name__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__magic_name__ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ = self.get_dummy_inputs()
__magic_name__ = pipe(**UpperCamelCase__ ).images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = ort.SessionOptions()
__magic_name__ = False
return options
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__magic_name__ = init_image.resize((128, 128) )
# using the PNDM scheduler by default
__magic_name__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ = """A fantasy landscape, trending on artstation"""
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" , )
__magic_name__ = output.images
__magic_name__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__magic_name__ = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
__magic_name__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__magic_name__ = init_image.resize((128, 128) )
__magic_name__ = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
__magic_name__ = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__magic_name__ = """A fantasy landscape, trending on artstation"""
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type="""np""" , )
__magic_name__ = output.images
__magic_name__ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
__magic_name__ = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 88 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
__magic_name__ = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__magic_name__ = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
__magic_name__ = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
__magic_name__ = model(UpperCamelCase__ , labels=UpperCamelCase__ ).loss
__magic_name__ = -tf.math.reduce_mean(UpperCamelCase__ ).numpy()
__magic_name__ = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 88 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = generator("""Something there""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
__magic_name__ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
__magic_name__ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
__magic_name__ = 3
__magic_name__ = generator(
"""Something there""" , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
__magic_name__ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = generator("""This is a test""" , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
__magic_name__ = generator.model.config.eos_token_id
__magic_name__ = """<pad>"""
__magic_name__ = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowercase ( self : int ) -> str:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
| 88 | 1 |
def a__ ( A_, A_ ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def a__ ( ):
'''simple docstring'''
assert xnor_gate(0, 0 ) == 1
assert xnor_gate(0, 1 ) == 0
assert xnor_gate(1, 0 ) == 0
assert xnor_gate(1, 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 88 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : List[Any] = 16
__lowerCAmelCase : Any = 32
def a__ ( A_, A_, A_, A_, A_ = 16 ):
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__magic_name__ = DatasetDict(
{
"""train""": dataset["""train"""].select(A_ ),
"""validation""": dataset["""train"""].select(A_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(A_ ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=A_, max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ = datasets.map(
A_, batched=A_, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(A_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ = 16
elif accelerator.mixed_precision != "no":
__magic_name__ = 8
else:
__magic_name__ = None
return tokenizer.pad(
A_, padding="""longest""", max_length=A_, pad_to_multiple_of=A_, return_tensors="""pt""", )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets["""train"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""validation"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""test"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
return train_dataloader, eval_dataloader, test_dataloader
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = []
# Download the dataset
__magic_name__ = load_dataset("""glue""", """mrpc""" )
# Create our splits
__magic_name__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__magic_name__ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config["""lr"""]
__magic_name__ = int(config["""num_epochs"""] )
__magic_name__ = int(config["""seed"""] )
__magic_name__ = int(config["""batch_size"""] )
__magic_name__ = evaluate.load("""glue""", """mrpc""" )
# If the batch size is too big we use gradient accumulation
__magic_name__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ = MAX_GPU_BATCH_SIZE
set_seed(A_ )
# New Code #
# Create our folds:
__magic_name__ = kfold.split(np.zeros(datasets["""train"""].num_rows ), datasets["""train"""]["""label"""] )
__magic_name__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A_ ):
__magic_name__ , __magic_name__ , __magic_name__ = get_fold_dataloaders(
A_, A_, A_, A_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=A_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ = AdamW(params=model.parameters(), lr=A_ )
# Instantiate scheduler
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=A_, num_warmup_steps=100, num_training_steps=(len(A_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
A_, A_, A_, A_, A_ )
# Now we train the model
for epoch in range(A_ ):
model.train()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ = model(**A_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=A_, references=A_, )
__magic_name__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''', A_ )
# New Code #
# We also run predictions on the test set at the very end
__magic_name__ = []
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__magic_name__ = torch.cat(A_, dim=0 )
__magic_name__ = torch.stack(A_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__magic_name__ = metric.compute(predictions=A_, references=A_ )
accelerator.print("""Average test metrics from all folds:""", A_ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=A_, default=A_, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""", type=A_, default=3, help="""The number of splits to perform across the dataset""" )
__magic_name__ = parser.parse_args()
__magic_name__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A_, A_ )
if __name__ == "__main__":
main()
| 88 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[str] , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__magic_name__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sgugger/tiny-distilbert-classification"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = """patrickvonplaten/t5-tiny-random"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , configs=[config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , """env.csv""" ) , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """env.csv""" ) ).exists() )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(UpperCamelCase__ : Dict ):
self.assertTrue(hasattr(UpperCamelCase__ , """sequential""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """cumulative""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """current""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , """log.txt""" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """log.txt""" ) ).exists() )
| 88 |
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(A_ ) == 1:
return True
__magic_name__ = series[1] - series[0]
for index in range(len(A_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
__magic_name__ = 0
for val in series:
answer += val
return answer / len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ = DisjunctiveConstraint(UpperCamelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCamelCase__ ) )
with self.assertRaises(UpperCamelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCamelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
__magic_name__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCamelCase__ ):
DisjunctiveConstraint(UpperCamelCase__ ) # fails here
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = [[1, 2, 3], [1, 2, 4]]
__magic_name__ = DisjunctiveConstraint(UpperCamelCase__ )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
__magic_name__ = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(3 )
__magic_name__ = stepped is True and completed is True and reset is False
self.assertTrue(UpperCamelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ = DisjunctiveConstraint(UpperCamelCase__ )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 88 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=("DownEncoderBlock2D",) , UpperCamelCase__ : Optional[Any]=(64,) , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : List[str]=True , ) -> str:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
# down
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = 2 * out_channels if double_z else out_channels
__magic_name__ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = x
__magic_name__ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : int ):
def custom_forward(*UpperCamelCase__ : str ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
__magic_name__ = down_block(UpperCamelCase__ )
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ )
# post-process
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[Any]=("UpDecoderBlock2D",) , UpperCamelCase__ : List[Any]=(64,) , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[int]="silu" , UpperCamelCase__ : Tuple="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
__magic_name__ = in_channels if norm_type == """spatial""" else None
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
__magic_name__ = list(reversed(UpperCamelCase__ ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
__magic_name__ = output_channel
# out
if norm_type == "spatial":
__magic_name__ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None ) -> Tuple:
"""simple docstring"""
__magic_name__ = z
__magic_name__ = self.conv_in(UpperCamelCase__ )
__magic_name__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Optional[int] ):
def custom_forward(*UpperCamelCase__ : int ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
else:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict="random" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=True ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = n_e
__magic_name__ = vq_embed_dim
__magic_name__ = beta
__magic_name__ = legacy
__magic_name__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__magic_name__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__magic_name__ = self.used.shape[0]
__magic_name__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__magic_name__ = self.re_embed
__magic_name__ = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__magic_name__ = n_e
__magic_name__ = sane_index_shape
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
__magic_name__ = (inds[:, :, None] == used[None, None, ...]).long()
__magic_name__ = match.argmax(-1 )
__magic_name__ = match.sum(2 ) < 1
if self.unknown_index == "random":
__magic_name__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__magic_name__ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
__magic_name__ = 0 # simply set to zero
__magic_name__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
__magic_name__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__magic_name__ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
__magic_name__ = self.embedding(UpperCamelCase__ ).view(z.shape )
__magic_name__ = None
__magic_name__ = None
# compute loss for embedding
if not self.legacy:
__magic_name__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__magic_name__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__magic_name__ = z + (z_q - z).detach()
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__magic_name__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__magic_name__ = self.remap_to_used(UpperCamelCase__ )
__magic_name__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__magic_name__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
if self.remap is not None:
__magic_name__ = indices.reshape(shape[0] , -1 ) # add batch axis
__magic_name__ = self.unmap_to_all(UpperCamelCase__ )
__magic_name__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__magic_name__ = self.embedding(UpperCamelCase__ )
if shape is not None:
__magic_name__ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parameters
__magic_name__ , __magic_name__ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
__magic_name__ = torch.clamp(self.logvar , -30.0 , 20.0 )
__magic_name__ = deterministic
__magic_name__ = torch.exp(0.5 * self.logvar )
__magic_name__ = torch.exp(self.logvar )
if self.deterministic:
__magic_name__ = __magic_name__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
__magic_name__ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__magic_name__ = self.mean + self.std * sample
return x
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
__magic_name__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.mean
| 88 | 1 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__lowerCAmelCase : Dict = logging.getLogger(__name__)
__lowerCAmelCase : List[str] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__lowerCAmelCase : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
a__ = field(
default=_A , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a__ = field(
default=_A , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_A )} , )
a__ = field(
default=_A , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a__ = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__ = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a__ = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a__ = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a__ = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
a__ = field(
default=_A , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a__ = field(
default=_A , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a__ = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
a__ = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a__ = field(
default=_A , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a__ = field(
default=_A , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a__ = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a__ = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a__ = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a__ = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a__ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a__ = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
if self.train_file is not None:
__magic_name__ = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__magic_name__ = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a__ ( A_, A_ ):
'''simple docstring'''
with open(A_, """r""", encoding="""utf-8""" ) as f:
__magic_name__ = [json.loads(A_ ) for line in f.read().splitlines() if (len(A_ ) > 0 and not line.isspace())]
assert len(A_ ) == len(A_ )
__magic_name__ = {c: dataset[c] for c in dataset.column_names}
__magic_name__ = refs
return Dataset.from_dict(A_ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__magic_name__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""", A_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__magic_name__ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=f'''train[:{data_args.validation_split_percentage}%]''', )
__magic_name__ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=f'''train[{data_args.validation_split_percentage}%:]''', )
else:
__magic_name__ = {}
if data_args.train_file is not None:
__magic_name__ = data_args.train_file
if data_args.validation_file is not None:
__magic_name__ = data_args.validation_file
__magic_name__ = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
__magic_name__ = """text"""
__magic_name__ = load_dataset(A_, data_files=A_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__magic_name__ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__magic_name__ = AutoConfig.from_pretrained(model_args.config_name, **A_ )
elif model_args.model_name_or_path:
__magic_name__ = AutoConfig.from_pretrained(model_args.model_name_or_path, **A_ )
else:
__magic_name__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
__magic_name__ = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **A_ )
elif model_args.model_name_or_path:
__magic_name__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **A_ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
__magic_name__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=A_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("""Training new model from scratch""" )
__magic_name__ = AutoModelForMaskedLM.from_config(A_ )
model.resize_token_embeddings(len(A_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__magic_name__ = datasets["""train"""].column_names
else:
__magic_name__ = datasets["""validation"""].column_names
__magic_name__ = """text""" if """text""" in column_names else column_names[0]
__magic_name__ = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(A_ ):
# Remove empty lines
__magic_name__ = [line for line in examples["""text"""] if len(A_ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""], padding=A_, truncation=A_, max_length=data_args.max_seq_length )
__magic_name__ = datasets.map(
A_, batched=A_, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__magic_name__ = add_chinese_references(tokenized_datasets["""train"""], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__magic_name__ = add_chinese_references(
tokenized_datasets["""validation"""], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__magic_name__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__magic_name__ = False
# Data collator
# This one will take care of randomly masking the tokens.
__magic_name__ = DataCollatorForWholeWordMask(tokenizer=A_, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__magic_name__ = Trainer(
model=A_, args=A_, train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None, eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None, tokenizer=A_, data_collator=A_, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__magic_name__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__magic_name__ = model_args.model_name_or_path
else:
__magic_name__ = None
__magic_name__ = trainer.train(resume_from_checkpoint=A_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__magic_name__ = os.path.join(training_args.output_dir, """train_results.txt""" )
if trainer.is_world_process_zero():
with open(A_, """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, """trainer_state.json""" ) )
# Evaluation
__magic_name__ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__magic_name__ = trainer.evaluate()
__magic_name__ = math.exp(eval_output["""eval_loss"""] )
__magic_name__ = perplexity
__magic_name__ = os.path.join(training_args.output_dir, """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(A_, """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def a__ ( A_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 88 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Any=[1, 2, 1] , UpperCamelCase__ : int=[2, 2, 4] , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=2.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCamelCase__ : Tuple=[1, 2, 3] , ) -> Dict:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
__magic_name__ = window_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_absolute_embeddings
__magic_name__ = patch_norm
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = is_training
__magic_name__ = scope
__magic_name__ = use_labels
__magic_name__ = type_sequence_label_size
__magic_name__ = encoder_stride
__magic_name__ = out_features
__magic_name__ = out_indices
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase__ ):
__magic_name__ = ["""stem"""]
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swin has a different seq_length
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCamelCase__ : Union[str, Any] ):
__magic_name__ = 0
return t
def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int={} ):
with torch.no_grad():
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has'''
F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.'''
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _A ):
'''simple docstring'''
a__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ = MaskFormerSwinConfig
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__magic_name__ = backbone_class(UpperCamelCase__ )
backbone.to(UpperCamelCase__ )
backbone.eval()
__magic_name__ = backbone(**UpperCamelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__magic_name__ = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__magic_name__ , __magic_name__ , __magic_name__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__magic_name__ = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertIsNotNone(outputs.attentions )
| 88 | 1 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = RoFormerTokenizer
a__ = RoFormerTokenizerFast
a__ = True
a__ = True
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
def _lowercase ( self : List[str] , **UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **UpperCamelCase__ )
def _lowercase ( self : List[str] , **UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **UpperCamelCase__ )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """永和服装饰品有限公司,今天天气非常好"""
__magic_name__ = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.get_tokenizer()
__magic_name__ , __magic_name__ = self.get_chinese_input_output_texts()
__magic_name__ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , output_text.split() )
__magic_name__ = tokens + [tokenizer.unk_token]
__magic_name__ = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ , __magic_name__ = self.get_chinese_input_output_texts()
__magic_name__ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , output_text.split() )
__magic_name__ = tokens + [tokenizer.unk_token]
__magic_name__ = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
pass
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
| 88 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : int ) -> None:
"""simple docstring"""
__magic_name__ = value
__magic_name__ = None
__magic_name__ = None
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Node ) -> None:
"""simple docstring"""
__magic_name__ = tree
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """deberta-v2"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : int=12_8100 , UpperCamelCase__ : Union[str, Any]=1536 , UpperCamelCase__ : Any=24 , UpperCamelCase__ : int=24 , UpperCamelCase__ : List[Any]=6144 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : str=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : List[Any]="gelu" , **UpperCamelCase__ : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = initializer_range
__magic_name__ = relative_attention
__magic_name__ = max_relative_positions
__magic_name__ = pad_token_id
__magic_name__ = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
__magic_name__ = [x.strip() for x in pos_att_type.lower().split("""|""" )]
__magic_name__ = pos_att_type
__magic_name__ = vocab_size
__magic_name__ = layer_norm_eps
__magic_name__ = kwargs.get("""pooler_hidden_size""" , UpperCamelCase__ )
__magic_name__ = pooler_dropout
__magic_name__ = pooler_hidden_act
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@property
def _lowercase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__magic_name__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__ = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
return 12
def _lowercase ( self : str , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
"""simple docstring"""
__magic_name__ = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 88 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : str = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 1 |
import random
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ , __magic_name__ = [], [], []
for element in data:
if element < pivot:
less.append(A_ )
elif element > pivot:
greater.append(A_ )
else:
equal.append(A_ )
return less, equal, greater
def a__ ( A_, A_ ):
'''simple docstring'''
if index >= len(A_ ) or index < 0:
return None
__magic_name__ = items[random.randint(0, len(A_ ) - 1 )]
__magic_name__ = 0
__magic_name__ , __magic_name__ , __magic_name__ = _partition(A_, A_ )
__magic_name__ = len(A_ )
__magic_name__ = len(A_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A_, A_ )
# must be in larger
else:
return quick_select(A_, index - (m + count) )
| 88 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[str] , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__magic_name__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sgugger/tiny-distilbert-classification"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = """patrickvonplaten/t5-tiny-random"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , configs=[config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , """env.csv""" ) , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """env.csv""" ) ).exists() )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(UpperCamelCase__ : Dict ):
self.assertTrue(hasattr(UpperCamelCase__ , """sequential""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """cumulative""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """current""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , """log.txt""" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """log.txt""" ) ).exists() )
| 88 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : List[str] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
a__ = field(default=_A , metadata={"""help""": """Whether to SortishSamler or not."""} )
a__ = field(
default=_A , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a__ = field(default=_A , metadata={"""help""": """whether to use adafactor"""} )
a__ = field(
default=_A , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
a__ = field(
default=_A , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
a__ = field(default=_A , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
a__ = field(
default=_A , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
a__ = field(
default="""linear""" , metadata={"""help""": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 88 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__lowerCAmelCase : Optional[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__lowerCAmelCase : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a__ ( A_ ):
'''simple docstring'''
return x[0]
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_letter_count(A_ )
__magic_name__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A_ )
__magic_name__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A_ )
__magic_name__ = """""".join(freq_to_letter[freq] )
__magic_name__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A_, reverse=A_ )
__magic_name__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_frequency_order(A_ )
__magic_name__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
def a__ ( A_ ):
'''simple docstring'''
if len(A_ ) <= 1:
return [tuple(A_ )]
__magic_name__ = []
def generate(A_, A_ ):
__magic_name__ = [0] * n
res.append(tuple(A_ ) )
__magic_name__ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
__magic_name__ , __magic_name__ = arr[i], arr[0]
else:
__magic_name__ , __magic_name__ = arr[i], arr[c[i]]
res.append(tuple(A_ ) )
c[i] += 1
__magic_name__ = 0
else:
__magic_name__ = 0
i += 1
generate(len(A_ ), A_ )
return res
if __name__ == "__main__":
__lowerCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : int = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 88 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCAmelCase : Any = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def a__ ( A_=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_A ) )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = None
a__ = None
def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
__magic_name__ = dataset_module_factory(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
__magic_name__ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
__magic_name__ = builder_cls(
cache_dir=UpperCamelCase__ , config_name=UpperCamelCase__ , hash=dataset_module.hash , )
__magic_name__ = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCamelCase__ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__magic_name__ = cached_path(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__magic_name__ = None
builder_instance.download_and_prepare()
__magic_name__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path, dataset=A_ )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
__magic_name__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(A_, A_ )
assert "train" in ds
assert isinstance(ds["""train"""], A_ )
assert next(iter(ds["""train"""] ) )
| 88 | 1 |
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Optional[int] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : Optional[int] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Tuple=False , ) -> Dict:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in predictions] )
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in references] )
else:
__magic_name__ = np.asarray(UpperCamelCase__ )
__magic_name__ = np.asarray(UpperCamelCase__ )
if ignore_case:
__magic_name__ = np.char.lower(UpperCamelCase__ )
__magic_name__ = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
__magic_name__ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
__magic_name__ = string.digits.maketrans("""""" , """""" , string.digits )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 88 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = torch.nn.Linear(10 , 10 )
__magic_name__ = torch.optim.SGD(model.parameters() , 0.1 )
__magic_name__ = Accelerator()
__magic_name__ = accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 88 | 1 |
def a__ ( A_ ):
'''simple docstring'''
if n_term == "":
return []
__magic_name__ = []
for temp in range(int(A_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
__lowerCAmelCase : int = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 88 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Optional[int] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=1 ) -> str:
"""simple docstring"""
__magic_name__ = tokenizer
__magic_name__ = dataset
__magic_name__ = len(UpperCamelCase__ ) if n_tasks is None else n_tasks
__magic_name__ = n_copies
def __iter__( self : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
__magic_name__ = self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = start_length
__magic_name__ = eof_strings
__magic_name__ = tokenizer
def __call__( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__magic_name__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = re.split("""(%s)""" % """|""".join(A_ ), A_ )
# last string should be ""
return "".join(string_list[:-2] )
def a__ ( A_, A_, A_, A_, A_, A_=20, **A_ ):
'''simple docstring'''
__magic_name__ = defaultdict(A_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A_ ) ):
with torch.no_grad():
__magic_name__ = batch["""ids"""].shape[-1]
__magic_name__ = accelerator.unwrap_model(A_ ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]], num_return_sequences=A_, **A_ )
# each task is generated batch_size times
__magic_name__ = batch["""task_id"""].repeat(A_ )
__magic_name__ = accelerator.pad_across_processes(
A_, dim=1, pad_index=tokenizer.pad_token_id )
__magic_name__ , __magic_name__ = accelerator.gather((generated_tokens, generated_tasks) )
__magic_name__ = generated_tokens.cpu().numpy()
__magic_name__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A_, A_ ):
gen_token_dict[task].append(A_ )
__magic_name__ = [[] for _ in range(A_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__magic_name__ = tokenizer.decode(A_, skip_special_tokens=A_, clean_up_tokenization_spaces=A_ )
code_gens[task].append(remove_last_block(A_ ) )
return code_gens
def a__ ( ):
'''simple docstring'''
__magic_name__ = HfArgumentParser(A_ )
__magic_name__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__magic_name__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__magic_name__ = """false"""
if args.num_workers is None:
__magic_name__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__magic_name__ = Accelerator()
set_seed(args.seed, device_specific=A_ )
# Load model and tokenizer
__magic_name__ = AutoTokenizer.from_pretrained(args.model_ckpt )
__magic_name__ = tokenizer.eos_token
__magic_name__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__magic_name__ = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0, A_, A_ )] ),
}
# Load evaluation dataset and metric
__magic_name__ = load_dataset("""openai_humaneval""" )
__magic_name__ = load_metric("""code_eval""" )
__magic_name__ = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
__magic_name__ = args.n_samples // args.batch_size
__magic_name__ = TokenizedDataset(A_, human_eval["""test"""], n_copies=A_, n_tasks=A_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__magic_name__ = DataLoader(A_, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__magic_name__ = code_eval_metric.compute(references=[""""""], predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
__magic_name__ , __magic_name__ = accelerator.prepare(A_, A_ )
__magic_name__ = complete_code(
A_, A_, A_, A_, n_tasks=A_, batch_size=args.batch_size, **A_, )
if accelerator.is_main_process:
__magic_name__ = []
for task in tqdm(range(A_ ) ):
__magic_name__ = human_eval["""test"""][task]["""test"""]
__magic_name__ = f'''check({human_eval['test'][task]['entry_point']})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
__magic_name__ , __magic_name__ = code_eval_metric.compute(
references=A_, predictions=A_, num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file, """w""" ) as fp:
json.dump(A_, A_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 88 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Dict = '▁'
__lowerCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'}
__lowerCAmelCase : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
__lowerCAmelCase : str = {
'google/reformer-crime-and-punishment': 524288,
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : List[Any]=[] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Optional[int] , ) -> None:
"""simple docstring"""
__magic_name__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
__magic_name__ = vocab_file
__magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _lowercase ( self : str ) -> Dict[str, int]:
"""simple docstring"""
__magic_name__ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.__dict__.copy()
__magic_name__ = None
return state
def __setstate__( self : Any , UpperCamelCase__ : Optional[int] ) -> Dict:
"""simple docstring"""
__magic_name__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ = {}
__magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : str , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase__ )
def _lowercase ( self : Any , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__magic_name__ = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = []
__magic_name__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
__magic_name__ = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def _lowercase ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , """wb""" ) as fi:
__magic_name__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 88 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
'''simple docstring'''
__magic_name__ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""", type=A_, default=1, help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""", type=A_, help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
), )
# rest from the training program
parser.add_argument("""training_script_args""", nargs=A_ )
return parser.parse_args()
def a__ ( ):
'''simple docstring'''
__magic_name__ = parse_args()
# Import training_script as a module.
__magic_name__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__magic_name__ = script_fpath.stem
__magic_name__ = importlib.import_module(A_ )
# Patch sys.argv
__magic_name__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 88 | 1 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
a__ = 42
a__ = None
# Automatically constructed
a__ = "dict"
a__ = None
a__ = field(default="""Translation""" , init=_A , repr=_A )
def __call__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _lowercase ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
a__ = None
a__ = None
a__ = None
# Automatically constructed
a__ = "dict"
a__ = None
a__ = field(default="""TranslationVariableLanguages""" , init=_A , repr=_A )
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = sorted(set(self.languages ) ) if self.languages else None
__magic_name__ = len(self.languages ) if self.languages else None
def __call__( self : Any ) -> Optional[Any]:
"""simple docstring"""
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def _lowercase ( self : str , UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = set(self.languages )
if self.languages and set(UpperCamelCase__ ) - lang_set:
raise ValueError(
F'''Some languages in example ({', '.join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({', '.join(UpperCamelCase__ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__magic_name__ = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__magic_name__ , __magic_name__ = zip(*sorted(UpperCamelCase__ ) )
return {"language": languages, "translation": translations}
def _lowercase ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 88 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pegasus"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
| 88 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : str = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 |
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Optional[int] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : Optional[int] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Tuple=False , ) -> Dict:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in predictions] )
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in references] )
else:
__magic_name__ = np.asarray(UpperCamelCase__ )
__magic_name__ = np.asarray(UpperCamelCase__ )
if ignore_case:
__magic_name__ = np.char.lower(UpperCamelCase__ )
__magic_name__ = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
__magic_name__ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
__magic_name__ = string.digits.maketrans("""""" , """""" , string.digits )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 88 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """timm_backbone"""
def __init__( self : Dict , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Any=3 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Any , ) -> Any:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
__magic_name__ = backbone
__magic_name__ = num_channels
__magic_name__ = features_only
__magic_name__ = use_pretrained_backbone
__magic_name__ = True
__magic_name__ = out_indices if out_indices is not None else (-1,)
| 88 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = emb.weight.shape
__magic_name__ = nn.Linear(A_, A_, bias=A_ )
__magic_name__ = emb.weight.data
return lin_layer
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = torch.load(A_, map_location="""cpu""" )
__magic_name__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
__magic_name__ = checkpoint["""model"""]
remove_ignore_keys_(A_ )
__magic_name__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
__magic_name__ = {key.replace("""decoder""", """model""" ): val for key, val in state_dict.items()}
__magic_name__ = XGLMConfig(
vocab_size=A_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""gelu""", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
__magic_name__ = XGLMForCausalLM(A_ )
__magic_name__ = model.load_state_dict(A_, strict=A_ )
print(A_ )
__magic_name__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 88 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """gpt_neox"""
def __init__( self : int , UpperCamelCase__ : List[Any]=5_0432 , UpperCamelCase__ : str=6144 , UpperCamelCase__ : List[str]=44 , UpperCamelCase__ : List[str]=64 , UpperCamelCase__ : List[Any]=2_4576 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Optional[int]=0.25 , UpperCamelCase__ : Optional[Any]=1_0000 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=2048 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = rotary_pct
__magic_name__ = rotary_emb_base
__magic_name__ = attention_dropout
__magic_name__ = hidden_dropout
__magic_name__ = classifier_dropout
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = use_cache
__magic_name__ = tie_word_embeddings
__magic_name__ = use_parallel_residual
__magic_name__ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''' )
__magic_name__ = self.rope_scaling.get("""type""" , UpperCamelCase__ )
__magic_name__ = self.rope_scaling.get("""factor""" , UpperCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 88 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Any = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
__lowerCAmelCase : str = '|'.join(sys.argv[1:])
__lowerCAmelCase : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 88 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
'''simple docstring'''
__magic_name__ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""", type=A_, default=1, help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""", type=A_, help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
), )
# rest from the training program
parser.add_argument("""training_script_args""", nargs=A_ )
return parser.parse_args()
def a__ ( ):
'''simple docstring'''
__magic_name__ = parse_args()
# Import training_script as a module.
__magic_name__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__magic_name__ = script_fpath.stem
__magic_name__ = importlib.import_module(A_ )
# Patch sys.argv
__magic_name__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 88 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=99 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=36 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Union[str, Any]=6 , UpperCamelCase__ : int=37 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=16 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Dict=None , ) -> Any:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = embedding_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_hidden_groups
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowercase ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = AlbertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
__magic_name__ = AlbertForPreTraining(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , sentence_order_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = AlbertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = AlbertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = AlbertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = AlbertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.num_choices
__magic_name__ = AlbertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
def _lowercase ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def _lowercase ( self : int ) -> int:
"""simple docstring"""
__magic_name__ = AlbertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = AlbertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = AlbertModel.from_pretrained("""albert-base-v2""" )
__magic_name__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__magic_name__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__magic_name__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) )
| 88 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__lowerCAmelCase : int = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
__lowerCAmelCase : Optional[int] = {'facebook/blenderbot_small-90M': 512}
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = set()
__magic_name__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ = char
__magic_name__ = set(A_ )
return pairs
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any]="__start__" , UpperCamelCase__ : int="__end__" , UpperCamelCase__ : List[str]="__unk__" , UpperCamelCase__ : str="__null__" , **UpperCamelCase__ : List[str] , ) -> Any:
"""simple docstring"""
super().__init__(unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , **UpperCamelCase__ )
with open(UpperCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ = json.load(UpperCamelCase__ )
__magic_name__ = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ = merges_handle.read().split("""\n""" )[1:-1]
__magic_name__ = [tuple(merge.split() ) for merge in merges]
__magic_name__ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__magic_name__ = {}
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return len(self.encoder )
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__magic_name__ = re.sub("""([.,!?()])""" , R""" \1""" , UpperCamelCase__ )
__magic_name__ = re.sub("""(')""" , R""" \1 """ , UpperCamelCase__ )
__magic_name__ = re.sub(R"""\s{2,}""" , """ """ , UpperCamelCase__ )
if "\n" in token:
__magic_name__ = token.replace("""\n""" , """ __newln__""" )
__magic_name__ = token.split(""" """ )
__magic_name__ = []
for token in tokens:
if not len(UpperCamelCase__ ):
continue
__magic_name__ = token.lower()
__magic_name__ = tuple(UpperCamelCase__ )
__magic_name__ = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__magic_name__ = get_pairs(UpperCamelCase__ )
if not pairs:
words.append(UpperCamelCase__ )
continue
while True:
__magic_name__ = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ = bigram
__magic_name__ = []
__magic_name__ = 0
while i < len(UpperCamelCase__ ):
try:
__magic_name__ = word.index(UpperCamelCase__ , UpperCamelCase__ )
new_word.extend(word[i:j] )
__magic_name__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ = tuple(UpperCamelCase__ )
__magic_name__ = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
__magic_name__ = get_pairs(UpperCamelCase__ )
__magic_name__ = """@@ """.join(UpperCamelCase__ )
__magic_name__ = word[:-4]
__magic_name__ = word
words.append(UpperCamelCase__ )
return " ".join(UpperCamelCase__ )
def _lowercase ( self : str , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = []
__magic_name__ = re.findall(R"""\S+\n?""" , UpperCamelCase__ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(""" """ ) ) )
return split_tokens
def _lowercase ( self : str , UpperCamelCase__ : str ) -> int:
"""simple docstring"""
__magic_name__ = token.lower()
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def _lowercase ( self : List[str] , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def _lowercase ( self : Tuple , UpperCamelCase__ : List[str] ) -> str:
"""simple docstring"""
__magic_name__ = """ """.join(UpperCamelCase__ ).replace("""@@ """ , """""" ).strip()
return out_string
def _lowercase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + """\n""" )
__magic_name__ = 0
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ = token_index
writer.write(""" """.join(UpperCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 88 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """biogpt"""
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=4_2384 , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : Any=24 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Tuple=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : str=1024 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[str]=1E-12 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : List[str]=2 , **UpperCamelCase__ : Optional[int] , ) -> Tuple:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = scale_embedding
__magic_name__ = use_cache
__magic_name__ = layerdrop
__magic_name__ = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 88 | 1 |
from maths.prime_check import is_prime
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
__magic_name__ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(A_ )
if is_prime(A_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowerCAmelCase : Any = get_logger(__name__)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = (
os.path.join(UpperCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__magic_name__ = Extractor
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__magic_name__ = os.path.abspath(UpperCamelCase__ )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase__ ) )
def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(UpperCamelCase__ ) and not (os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ ))
)
def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> str:
"""simple docstring"""
__magic_name__ = self.extractor.infer_extractor_format(UpperCamelCase__ )
if not extractor_format:
return input_path
__magic_name__ = self._get_output_path(UpperCamelCase__ )
if self._do_extract(UpperCamelCase__ , UpperCamelCase__ ):
self.extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return output_path
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
@abstractmethod
def _lowercase ( cls : List[str] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : Union[str, Any] ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
...
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
a__ = []
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ , """rb""" ) as f:
return f.read(UpperCamelCase__ )
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
__magic_name__ = max(len(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
try:
__magic_name__ = cls.read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(UpperCamelCase__ )
@staticmethod
def _lowercase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
def resolved(UpperCamelCase__ : str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase__ ) )
def badpath(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ).startswith(UpperCamelCase__ )
def badlink(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
__magic_name__ = resolved(os.path.join(UpperCamelCase__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase__ )
__magic_name__ = resolved(UpperCamelCase__ )
for finfo in members:
if badpath(finfo.name , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = tarfile.open(UpperCamelCase__ )
tar_file.extractall(UpperCamelCase__ , members=TarExtractor.safemembers(UpperCamelCase__ , UpperCamelCase__ ) )
tar_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x1F\x8B"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(UpperCamelCase__ , """rb""" ) as gzip_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase__ , """rb""" ) as fp:
__magic_name__ = _EndRecData(UpperCamelCase__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__magic_name__ = fp.read(UpperCamelCase__ ) # CD is where we expect it to be
if len(UpperCamelCase__ ) == sizeCentralDir:
__magic_name__ = struct.unpack(UpperCamelCase__ , UpperCamelCase__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with zipfile.ZipFile(UpperCamelCase__ , """r""" ) as zip_file:
zip_file.extractall(UpperCamelCase__ )
zip_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(UpperCamelCase__ ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = rarfile.RarFile(UpperCamelCase__ )
rf.extractall(UpperCamelCase__ )
rf.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__magic_name__ = zstd.ZstdDecompressor()
with open(UpperCamelCase__ , """rb""" ) as ifh, open(UpperCamelCase__ , """wb""" ) as ofh:
dctx.copy_stream(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x42\x5A\x68"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with pyazr.SevenZipFile(UpperCamelCase__ , """r""" ) as archive:
archive.extractall(UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ :
'''simple docstring'''
a__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowercase ( cls : Tuple ) -> Tuple:
"""simple docstring"""
return max(
len(UpperCamelCase__ )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase__ , UpperCamelCase__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase__ , magic_number_length=UpperCamelCase__ )
except OSError:
return b""
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = cls.infer_extractor_format(UpperCamelCase__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowercase ( cls : Dict , UpperCamelCase__ : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
__magic_name__ = cls._get_magic_number_max_length()
__magic_name__ = cls._read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return extractor_format
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(UpperCamelCase__ ) , exist_ok=UpperCamelCase__ )
# Prevent parallel extractions
__magic_name__ = str(Path(UpperCamelCase__ ).with_suffix(""".lock""" ) )
with FileLock(UpperCamelCase__ ):
shutil.rmtree(UpperCamelCase__ , ignore_errors=UpperCamelCase__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = extractor if extractor != """deprecated""" else extractor_format
else:
__magic_name__ = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=UpperCamelCase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase__ ):
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
| 88 | 1 |
__lowerCAmelCase : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
__magic_name__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A_ )
__magic_name__ = """""".join(bin(A_ )[2:].zfill(8 ) for byte in data )
__magic_name__ = len(A_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
__magic_name__ = b"""=""" * ((6 - len(A_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A_ ) % 6)
else:
__magic_name__ = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(A_ ), 6 ) ).encode()
+ padding
)
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ) and not isinstance(A_, A_ ):
__magic_name__ = (
"""argument should be a bytes-like object or ASCII string, """
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A_, A_ ):
try:
__magic_name__ = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__magic_name__ = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__magic_name__ = encoded_data[:-padding]
__magic_name__ = """""".join(
bin(B64_CHARSET.index(A_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__magic_name__ = """""".join(
bin(B64_CHARSET.index(A_ ) )[2:].zfill(6 ) for char in encoded_data )
__magic_name__ = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(A_ ), 8 )
]
return bytes(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _a ( a :int , a :Dict=False ) -> Optional[Any]:
a = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
a = '''segformer.encoder.''' + key
if key.startswith('''backbone''' ):
a = key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
a = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
a = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(a )-1}""" )
if "norm" in key:
a = key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
a = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
a = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(a )-1}""" )
if "layer_norm1" in key:
a = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
a = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
a = key[key.find('''block''' ) + len('''block''' )]
a = key.replace(F"""block{idx}""" , F"""block.{int(a )-1}""" )
if "attn.q" in key:
a = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
a = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
a = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
a = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
a = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
a = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
a = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
a = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
a = key[key.find('''linear_c''' ) + len('''linear_c''' )]
a = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(a )-1}""" )
if key.startswith('''head''' ):
a = key.replace('''head''' , '''classifier''' )
a = value
return new_state_dict
def _a ( a :List[str] , a :Union[str, Any] ) -> Tuple:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
a = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
a = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
a = kv_weight[
: config.hidden_sizes[i], :
]
a = kv_bias[: config.hidden_sizes[i]]
a = kv_weight[
config.hidden_sizes[i] :, :
]
a = kv_bias[
config.hidden_sizes[i] :
]
def _a ( ) -> Optional[Any]:
a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def _a ( a :Tuple , a :Any , a :int ) -> Any:
a = SegformerConfig()
a = False
# set attributes based on model_name
a = '''huggingface/label-files'''
if "segformer" in model_name:
a = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
a = 150
a = '''ade20k-id2label.json'''
a = (1, 150, 128, 128)
elif "city" in model_name:
a = 19
a = '''cityscapes-id2label.json'''
a = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
a = True
a = model_name[4:6]
a = 1_000
a = '''imagenet-1k-id2label.json'''
a = (1, 1_000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
a = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
a = [64, 128, 320, 512]
a = 256
elif size == "b2":
a = [64, 128, 320, 512]
a = 768
a = [3, 4, 6, 3]
elif size == "b3":
a = [64, 128, 320, 512]
a = 768
a = [3, 4, 18, 3]
elif size == "b4":
a = [64, 128, 320, 512]
a = 768
a = [3, 8, 27, 3]
elif size == "b5":
a = [64, 128, 320, 512]
a = 768
a = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
a = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a , align=a , do_random_crop=a )
# prepare image
a = prepare_img()
a = image_processor(images=a , return_tensors='''pt''' ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
a = torch.load(a , map_location=torch.device('''cpu''' ) )
else:
a = torch.load(a , map_location=torch.device('''cpu''' ) )['''state_dict''']
# rename keys
a = rename_keys(a , encoder_only=a )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a , a )
# create HuggingFace model and load state dict
if encoder_only:
a = False
a = SegformerForImageClassification(a )
else:
a = SegformerForSemanticSegmentation(a )
model.load_state_dict(a )
model.eval()
# forward pass
a = model(a )
a = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
a = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
a = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
a = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
a = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
a = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
a = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
a = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
a = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
a = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
a = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
a = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
a = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
a = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
a = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
a = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
a = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 0 |
'''simple docstring'''
import numpy as np
SCREAMING_SNAKE_CASE_: Dict =[
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class __A :
def __init__(self : Tuple ):
UpperCAmelCase_ = np.array(__a )
def _lowercase (self : Union[str, Any] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = np.where(letter == self.SQUARE )
UpperCAmelCase_ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _lowercase (self : str , __a : int , __a : int ):
UpperCAmelCase_ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _lowercase (self : Optional[Any] , __a : str ):
UpperCAmelCase_ = message.lower()
UpperCAmelCase_ = message.replace(" " , "" )
UpperCAmelCase_ = message.replace("j" , "i" )
UpperCAmelCase_ = np.empty((2, len(__a )) )
for letter_index in range(len(__a ) ):
UpperCAmelCase_ = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase_ = numbers[0]
UpperCAmelCase_ = numbers[1]
UpperCAmelCase_ = first_step.reshape(2 * len(__a ) )
UpperCAmelCase_ = ""
for numbers_index in range(len(__a ) ):
UpperCAmelCase_ = int(second_step[numbers_index * 2] )
UpperCAmelCase_ = int(second_step[(numbers_index * 2) + 1] )
UpperCAmelCase_ = self.numbers_to_letter(__a , __a )
UpperCAmelCase_ = encoded_message + letter
return encoded_message
def _lowercase (self : Dict , __a : str ):
UpperCAmelCase_ = message.lower()
message.replace(" " , "" )
UpperCAmelCase_ = np.empty(2 * len(__a ) )
for letter_index in range(len(__a ) ):
UpperCAmelCase_ = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase_ = numbers[0]
UpperCAmelCase_ = numbers[1]
UpperCAmelCase_ = first_step.reshape((2, len(__a )) )
UpperCAmelCase_ = ""
for numbers_index in range(len(__a ) ):
UpperCAmelCase_ = int(second_step[0, numbers_index] )
UpperCAmelCase_ = int(second_step[1, numbers_index] )
UpperCAmelCase_ = self.numbers_to_letter(__a , __a )
UpperCAmelCase_ = decoded_message + letter
return decoded_message
| 1 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = generator("""Something there""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
__magic_name__ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
__magic_name__ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
__magic_name__ = 3
__magic_name__ = generator(
"""Something there""" , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
__magic_name__ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = generator("""This is a test""" , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
__magic_name__ = generator.model.config.eos_token_id
__magic_name__ = """<pad>"""
__magic_name__ = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowercase ( self : int ) -> str:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
| 88 | 0 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowerCamelCase : Optional[Any] = 'src/transformers'
# Matches is_xxx_available()
lowerCamelCase : Union[str, Any] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowerCamelCase : int = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowerCamelCase : int = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase : Optional[Any] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase : Any = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase : List[Any] = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowerCamelCase : Union[str, Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowerCamelCase : Union[str, Any] = re.compile(R'^\s*try:')
# Catches a line with else:
lowerCamelCase : Tuple = re.compile(R'^\s*else:')
def _SCREAMING_SNAKE_CASE (A ) -> Union[str, Any]:
"""simple docstring"""
if _re_test_backend.search(A ) is None:
return None
lowercase__ = [b[0] for b in _re_backend.findall(A )]
backends.sort()
return "_and_".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
while line_index < len(A ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowercase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A ):
lowercase__ = _re_one_line_import_struct.search(A ).groups()[0]
lowercase__ = re.findall(R'''\[([^\]]+)\]''' , A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowercase__ = _re_import_struct_key_value.search(A )
if single_line_import_search is not None:
lowercase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A ) > 0]
objects.extend(A )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowercase__ = lines[line_index]
if _re_import_struct_add_one.search(A ) is not None:
objects.append(_re_import_struct_add_one.search(A ).groups()[0] )
elif _re_import_struct_add_many.search(A ) is not None:
lowercase__ = _re_import_struct_add_many.search(A ).groups()[0].split(''', ''' )
lowercase__ = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_between_brackets.search(A ) is not None:
lowercase__ = _re_between_brackets.search(A ).groups()[0].split(''', ''' )
lowercase__ = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_quote_object.search(A ) is not None:
objects.append(_re_quote_object.search(A ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ = []
while (
line_index < len(A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(A ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]:
"""simple docstring"""
def find_duplicates(A ):
return [k for k, v in collections.Counter(A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ = []
for key in import_dict_objects.keys():
lowercase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
lowercase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ = '''base imports''' if key == '''none''' else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _SCREAMING_SNAKE_CASE () -> int:
"""simple docstring"""
lowercase__ = []
for root, _, files in os.walk(A ):
if "__init__.py" in files:
lowercase__ = os.path.join(A , '''__init__.py''' )
lowercase__ = parse_init(A )
if objects is not None:
lowercase__ = analyze_results(*A )
if len(A ) > 0:
lowercase__ = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append('''\n'''.join(A ) )
if len(A ) > 0:
raise ValueError('''\n\n'''.join(A ) )
def _SCREAMING_SNAKE_CASE () -> Dict:
"""simple docstring"""
lowercase__ = []
for path, directories, files in os.walk(A ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowercase__ = str((Path(A ) / folder).relative_to(A ) )
lowercase__ = short_path.replace(os.path.sep , '''.''' )
submodules.append(A )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ = str((Path(A ) / fname).relative_to(A ) )
lowercase__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(A )
return submodules
lowerCamelCase : List[Any] = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def _SCREAMING_SNAKE_CASE () -> str:
"""simple docstring"""
from transformers.utils import direct_transformers_import
lowercase__ = direct_transformers_import(A )
lowercase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(A , '''__init__.py''' ) , '''r''' ) as f:
lowercase__ = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , A ) ) )
lowercase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(A ) > 0:
lowercase__ = '''\n'''.join(f"- {module}" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f"{list_of_modules}\n"
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : List[Any] = 16
__lowerCAmelCase : Any = 32
def a__ ( A_, A_, A_, A_, A_ = 16 ):
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__magic_name__ = DatasetDict(
{
"""train""": dataset["""train"""].select(A_ ),
"""validation""": dataset["""train"""].select(A_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(A_ ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=A_, max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ = datasets.map(
A_, batched=A_, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(A_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ = 16
elif accelerator.mixed_precision != "no":
__magic_name__ = 8
else:
__magic_name__ = None
return tokenizer.pad(
A_, padding="""longest""", max_length=A_, pad_to_multiple_of=A_, return_tensors="""pt""", )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets["""train"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""validation"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""test"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
return train_dataloader, eval_dataloader, test_dataloader
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = []
# Download the dataset
__magic_name__ = load_dataset("""glue""", """mrpc""" )
# Create our splits
__magic_name__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__magic_name__ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config["""lr"""]
__magic_name__ = int(config["""num_epochs"""] )
__magic_name__ = int(config["""seed"""] )
__magic_name__ = int(config["""batch_size"""] )
__magic_name__ = evaluate.load("""glue""", """mrpc""" )
# If the batch size is too big we use gradient accumulation
__magic_name__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ = MAX_GPU_BATCH_SIZE
set_seed(A_ )
# New Code #
# Create our folds:
__magic_name__ = kfold.split(np.zeros(datasets["""train"""].num_rows ), datasets["""train"""]["""label"""] )
__magic_name__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A_ ):
__magic_name__ , __magic_name__ , __magic_name__ = get_fold_dataloaders(
A_, A_, A_, A_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=A_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ = AdamW(params=model.parameters(), lr=A_ )
# Instantiate scheduler
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=A_, num_warmup_steps=100, num_training_steps=(len(A_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
A_, A_, A_, A_, A_ )
# Now we train the model
for epoch in range(A_ ):
model.train()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ = model(**A_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=A_, references=A_, )
__magic_name__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''', A_ )
# New Code #
# We also run predictions on the test set at the very end
__magic_name__ = []
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__magic_name__ = torch.cat(A_, dim=0 )
__magic_name__ = torch.stack(A_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__magic_name__ = metric.compute(predictions=A_, references=A_ )
accelerator.print("""Average test metrics from all folds:""", A_ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=A_, default=A_, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""", type=A_, default=3, help="""The number of splits to perform across the dataset""" )
__magic_name__ = parser.parse_args()
__magic_name__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A_, A_ )
if __name__ == "__main__":
main()
| 88 | 0 |
'''simple docstring'''
from collections import deque
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = len(snake_case__ )
A : str = deque()
A : int = [False for _ in range(snake_case__ )]
A : List[Any] = [-1 for _ in range(snake_case__ )]
A : Optional[int] = index_of[:]
def strong_connect(snake_case__ , snake_case__ , snake_case__ ):
A : int = index # the number when this node is seen
A : Tuple = index # lowest rank node reachable from here
index += 1
stack.append(snake_case__ )
A : List[str] = True
for w in g[v]:
if index_of[w] == -1:
A : List[str] = strong_connect(snake_case__ , snake_case__ , snake_case__ )
A : str = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A : Optional[int] = []
A : List[str] = stack.pop()
A : List[str] = False
component.append(snake_case__ )
while w != v:
A : Optional[Any] = stack.pop()
A : List[str] = False
component.append(snake_case__ )
components.append(snake_case__ )
return index
A : Any = []
for v in range(snake_case__ ):
if index_of[v] == -1:
strong_connect(snake_case__ , 0 , snake_case__ )
return components
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = [[] for _ in range(snake_case__ )]
for u, v in edges:
g[u].append(snake_case__ )
return g
if __name__ == "__main__":
# Test
lowercase : str = 7
lowercase : Any = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowercase : Tuple = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowercase : Optional[int] = [(u, v) for u, v in zip(source, target)]
lowercase : Any = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 3 |
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(A_ ) == 1:
return True
__magic_name__ = series[1] - series[0]
for index in range(len(A_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
__magic_name__ = 0
for val in series:
answer += val
return answer / len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
'''simple docstring'''
__snake_case ={
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.3_5_5_8_1_8,
}
def a_ ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase = (
f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
f'''Valid values are: {', '.join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=("DownEncoderBlock2D",) , UpperCamelCase__ : Optional[Any]=(64,) , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : List[str]=True , ) -> str:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
# down
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = 2 * out_channels if double_z else out_channels
__magic_name__ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = x
__magic_name__ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : int ):
def custom_forward(*UpperCamelCase__ : str ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
__magic_name__ = down_block(UpperCamelCase__ )
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ )
# post-process
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[Any]=("UpDecoderBlock2D",) , UpperCamelCase__ : List[Any]=(64,) , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[int]="silu" , UpperCamelCase__ : Tuple="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
__magic_name__ = in_channels if norm_type == """spatial""" else None
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
__magic_name__ = list(reversed(UpperCamelCase__ ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
__magic_name__ = output_channel
# out
if norm_type == "spatial":
__magic_name__ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None ) -> Tuple:
"""simple docstring"""
__magic_name__ = z
__magic_name__ = self.conv_in(UpperCamelCase__ )
__magic_name__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Optional[int] ):
def custom_forward(*UpperCamelCase__ : int ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
else:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict="random" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=True ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = n_e
__magic_name__ = vq_embed_dim
__magic_name__ = beta
__magic_name__ = legacy
__magic_name__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__magic_name__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__magic_name__ = self.used.shape[0]
__magic_name__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__magic_name__ = self.re_embed
__magic_name__ = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__magic_name__ = n_e
__magic_name__ = sane_index_shape
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
__magic_name__ = (inds[:, :, None] == used[None, None, ...]).long()
__magic_name__ = match.argmax(-1 )
__magic_name__ = match.sum(2 ) < 1
if self.unknown_index == "random":
__magic_name__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__magic_name__ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
__magic_name__ = 0 # simply set to zero
__magic_name__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
__magic_name__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__magic_name__ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
__magic_name__ = self.embedding(UpperCamelCase__ ).view(z.shape )
__magic_name__ = None
__magic_name__ = None
# compute loss for embedding
if not self.legacy:
__magic_name__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__magic_name__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__magic_name__ = z + (z_q - z).detach()
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__magic_name__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__magic_name__ = self.remap_to_used(UpperCamelCase__ )
__magic_name__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__magic_name__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
if self.remap is not None:
__magic_name__ = indices.reshape(shape[0] , -1 ) # add batch axis
__magic_name__ = self.unmap_to_all(UpperCamelCase__ )
__magic_name__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__magic_name__ = self.embedding(UpperCamelCase__ )
if shape is not None:
__magic_name__ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parameters
__magic_name__ , __magic_name__ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
__magic_name__ = torch.clamp(self.logvar , -30.0 , 20.0 )
__magic_name__ = deterministic
__magic_name__ = torch.exp(0.5 * self.logvar )
__magic_name__ = torch.exp(self.logvar )
if self.deterministic:
__magic_name__ = __magic_name__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
__magic_name__ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__magic_name__ = self.mean + self.std * sample
return x
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
__magic_name__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.mean
| 88 | 0 |
def UpperCAmelCase_ ( __snake_case ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_lowercase =grid[0]
for row_n in range(1 , len(__snake_case ) ):
_lowercase =grid[row_n]
_lowercase =fill_row(__snake_case , __snake_case )
_lowercase =grid[row_n]
return grid[-1][-1]
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(__snake_case ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Any=[1, 2, 1] , UpperCamelCase__ : int=[2, 2, 4] , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=2.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCamelCase__ : Tuple=[1, 2, 3] , ) -> Dict:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
__magic_name__ = window_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_absolute_embeddings
__magic_name__ = patch_norm
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = is_training
__magic_name__ = scope
__magic_name__ = use_labels
__magic_name__ = type_sequence_label_size
__magic_name__ = encoder_stride
__magic_name__ = out_features
__magic_name__ = out_indices
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase__ ):
__magic_name__ = ["""stem"""]
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swin has a different seq_length
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCamelCase__ : Union[str, Any] ):
__magic_name__ = 0
return t
def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int={} ):
with torch.no_grad():
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has'''
F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.'''
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _A ):
'''simple docstring'''
a__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ = MaskFormerSwinConfig
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__magic_name__ = backbone_class(UpperCamelCase__ )
backbone.to(UpperCamelCase__ )
backbone.eval()
__magic_name__ = backbone(**UpperCamelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__magic_name__ = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__magic_name__ , __magic_name__ , __magic_name__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__magic_name__ = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertIsNotNone(outputs.attentions )
| 88 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowerCAmelCase ( a__ ) -> int:
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowerCAmelCase ( ) -> Union[str, Any]:
__a = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=a__ )
__a = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(a__ )
EnvironmentCommand.register_subcommand(a__ )
TestCommand.register_subcommand(a__ )
RunBeamCommand.register_subcommand(a__ )
DummyDataCommand.register_subcommand(a__ )
# Parse args
__a , __a = parser.parse_known_args()
if not hasattr(a__ , '''func''' ):
parser.print_help()
exit(1 )
__a = parse_unknown_args(a__ )
# Run
__a = args.func(a__ , **a__ )
service.run()
if __name__ == "__main__":
main() | 6 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : int ) -> None:
"""simple docstring"""
__magic_name__ = value
__magic_name__ = None
__magic_name__ = None
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Node ) -> None:
"""simple docstring"""
__magic_name__ = tree
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A :
"""simple docstring"""
def __init__( self : List[str],lowercase_ : str,lowercase_ : Any=1_3,lowercase_ : Dict=2,lowercase_ : Optional[Any]=2_4,lowercase_ : Optional[int]=1_6,lowercase_ : List[Any]=True,lowercase_ : Any=True,lowercase_ : int=3_2,lowercase_ : str=5,lowercase_ : Union[str, Any]=4,lowercase_ : Any=3_7,lowercase_ : List[Any]="gelu",lowercase_ : Optional[int]=0.1,lowercase_ : Any=0.1,lowercase_ : str=1_0,lowercase_ : Any=0.02,lowercase_ : int=None,lowercase_ : str=2,lowercase_ : Any=2,)-> List[str]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = patch_size
A__ = max_length
A__ = num_mel_bins
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = frequency_stride
A__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
A__ = (self.max_length - self.patch_size) // self.time_stride + 1
A__ = frequency_out_dimension * time_out_dimension
A__ = num_patches + 2
def snake_case__ ( self : Tuple )-> Union[str, Any]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = self.get_config()
return config, input_values, labels
def snake_case__ ( self : Optional[int] )-> Dict:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size,max_length=self.max_length,num_mel_bins=self.num_mel_bins,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=lowercase_,initializer_range=self.initializer_range,frequency_stride=self.frequency_stride,time_stride=self.time_stride,)
def snake_case__ ( self : List[Any],lowercase_ : Tuple,lowercase_ : int,lowercase_ : Union[str, Any] )-> Tuple:
'''simple docstring'''
A__ = ASTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_values': input_values}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : Union[str, Any],lowercase_ : Tuple,lowercase_ : Tuple,lowercase_ : List[str],lowercase_ : Any,lowercase_ : int )-> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case__ ( self : int )-> Optional[Any]:
'''simple docstring'''
A__ = ASTModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : str )-> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
pass
def snake_case__ ( self : Any )-> Any:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_,nn.Linear ) )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['input_values']
self.assertListEqual(arg_names[:1],lowercase_ )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
@slow
def snake_case__ ( self : Optional[Any] )-> List[str]:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ASTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _snake_case( ) -> Optional[Any]:
'''simple docstring'''
A__ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
A__ , A__ = torchaudio.load(SCREAMING_SNAKE_CASE__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : List[str] )-> Optional[Any]:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def snake_case__ ( self : Union[str, Any] )-> List[str]:
'''simple docstring'''
A__ = self.default_feature_extractor
A__ = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(lowercase_ )
A__ = self.default_feature_extractor
A__ , A__ = prepare_audio()
A__ = audio.squeeze().numpy()
A__ = feature_extractor(lowercase_,sampling_rate=lowercase_,return_tensors='pt' ).to(lowercase_ )
# forward pass
with torch.no_grad():
A__ = model(**lowercase_ )
# verify the logits
A__ = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape,lowercase_ )
A__ = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3],lowercase_,atol=1E-4 ) )
| 7 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : str = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 0 |
from math import pi
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10)) | 8 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[str] , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__magic_name__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sgugger/tiny-distilbert-classification"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = """patrickvonplaten/t5-tiny-random"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , configs=[config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , """env.csv""" ) , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """env.csv""" ) ).exists() )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(UpperCamelCase__ : Dict ):
self.assertTrue(hasattr(UpperCamelCase__ , """sequential""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """cumulative""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """current""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , """log.txt""" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """log.txt""" ) ).exists() )
| 88 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCamelCase ( lowercase__ , lowercase__ ):
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE : Union[str, Any] = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE : Any = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE : str = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Dict = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__SCREAMING_SNAKE_CASE : Tuple = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE : Dict = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE : Optional[Any] = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _UpperCamelCase ( lowercase__ , lowercase__ ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__SCREAMING_SNAKE_CASE : List[Any] = features.copy()
__SCREAMING_SNAKE_CASE : List[Any] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE : Dict = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Dict = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE : List[str] = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if issubclass(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = jsonl_path
elif issubclass(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : int = [jsonl_path]
__SCREAMING_SNAKE_CASE : int = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE : List[Any] = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=("train",) ):
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
__SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE : Dict = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE : Union[str, Any] = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE : Optional[Any] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE : List[Any] = JsonDatasetReader({'''train''': jsonl_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
if split:
__SCREAMING_SNAKE_CASE : Union[str, Any] = {split: jsonl_path}
else:
__SCREAMING_SNAKE_CASE : str = '''train'''
__SCREAMING_SNAKE_CASE : List[str] = {'''train''': jsonl_path, '''test''': jsonl_path}
__SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__SCREAMING_SNAKE_CASE : Dict = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCamelCase ( lowercase__ ):
return json.load(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
return [json.loads(lowercase__ ) for line in buffer]
class _lowercase :
'''simple docstring'''
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def __magic_name__( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE : Tuple = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE : List[Any] = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCAmelCase__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE : Tuple = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def __magic_name__( self :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE : Tuple = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCAmelCase__ ) == 10
def __magic_name__( self :Dict , lowerCAmelCase__ :Optional[Any] ) -> int:
with pytest.raises(lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def __magic_name__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = tmp_path_factory.mktemp('''data''' ) / f'''test.json.{extension}'''
__SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , compression=lowerCAmelCase__ ).write()
with fsspec.open(lowerCAmelCase__ , '''rb''' , compression='''infer''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = f.read()
with fsspec.open(lowerCAmelCase__ , '''rb''' , compression='''infer''' ) as f:
__SCREAMING_SNAKE_CASE : str = f.read()
assert exported_content == original_content
| 9 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__lowerCAmelCase : Optional[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__lowerCAmelCase : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a__ ( A_ ):
'''simple docstring'''
return x[0]
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_letter_count(A_ )
__magic_name__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A_ )
__magic_name__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A_ )
__magic_name__ = """""".join(freq_to_letter[freq] )
__magic_name__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A_, reverse=A_ )
__magic_name__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_frequency_order(A_ )
__magic_name__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=99 , UpperCAmelCase_ : Union[str, Any]=24 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : List[Any]=6 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : int=1_000 , ) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =parent
lowerCamelCase__: Any =batch_size
lowerCamelCase__: Optional[int] =seq_length
lowerCamelCase__: Any =is_training
lowerCamelCase__: Any =use_input_mask
lowerCamelCase__: Optional[Any] =use_token_type_ids
lowerCamelCase__: Optional[int] =use_labels
lowerCamelCase__: str =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: str =num_hidden_layers
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Any =intermediate_size
lowerCamelCase__: Any =hidden_act
lowerCamelCase__: Optional[int] =hidden_dropout_prob
lowerCamelCase__: List[Any] =attention_probs_dropout_prob
lowerCamelCase__: Optional[Any] =max_position_embeddings
lowerCamelCase__: int =type_vocab_size
lowerCamelCase__: int =type_sequence_label_size
lowerCamelCase__: int =initializer_range
lowerCamelCase__: Optional[int] =num_labels
lowerCamelCase__: Optional[int] =scope
lowerCamelCase__: Dict =range_bbox
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCamelCase__: Optional[Any] =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__: Any =bbox[i, j, 3]
lowerCamelCase__: Any =bbox[i, j, 1]
lowerCamelCase__: List[str] =t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__: Dict =bbox[i, j, 2]
lowerCamelCase__: Any =bbox[i, j, 0]
lowerCamelCase__: Any =t
lowerCamelCase__: List[Any] =None
if self.use_input_mask:
lowerCamelCase__: Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
lowerCamelCase__: List[str] =None
if self.use_token_type_ids:
lowerCamelCase__: List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCamelCase__: Dict =None
lowerCamelCase__: List[Any] =None
if self.use_labels:
lowerCamelCase__: Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCamelCase__: str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCamelCase__: List[Any] =self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[Any]:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , ) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =LiltModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: List[Any] =model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
lowerCamelCase__: int =model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =model(UpperCAmelCase_ , bbox=UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , ) ->Any:
'''simple docstring'''
lowerCamelCase__: str =self.num_labels
lowerCamelCase__: Optional[int] =LiltForTokenClassification(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Tuple =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =LiltForQuestionAnswering(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Tuple =model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Any =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
): Optional[int] =config_and_inputs
lowerCamelCase__: str ={
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int) ->Optional[Any]:
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =LiltModelTester(self)
lowerCamelCase__: int =ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : str) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__: int =type
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: int =LiltModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: str =LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =torch.tensor([[1, 2]] , device=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: Optional[Any] =model(input_ids=UpperCAmelCase_ , bbox=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =torch.Size([1, 2, 768])
lowerCamelCase__: List[str] =torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=UpperCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase_ , atol=1E-3))
| 10 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCAmelCase : Any = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def a__ ( A_=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_A ) )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = None
a__ = None
def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
__magic_name__ = dataset_module_factory(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
__magic_name__ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
__magic_name__ = builder_cls(
cache_dir=UpperCamelCase__ , config_name=UpperCamelCase__ , hash=dataset_module.hash , )
__magic_name__ = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCamelCase__ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__magic_name__ = cached_path(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__magic_name__ = None
builder_instance.download_and_prepare()
__magic_name__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path, dataset=A_ )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
__magic_name__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(A_, A_ )
assert "train" in ds
assert isinstance(ds["""train"""], A_ )
assert next(iter(ds["""train"""] ) )
| 88 | 0 |
def _UpperCAmelCase (UpperCamelCase__ : int ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("Input value must be a 'int' type" )
return bin(UpperCamelCase__ ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = torch.nn.Linear(10 , 10 )
__magic_name__ = torch.optim.SGD(model.parameters() , 0.1 )
__magic_name__ = Accelerator()
__magic_name__ = accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 88 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase_ = logging.getLogger(__name__)
UpperCAmelCase_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__lowerCamelCase)} , )
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCAmelCase__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCAmelCase__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCAmelCase__ : bool = field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase__ ( self: List[Any] ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class lowerCamelCase__:
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'})
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
UpperCAmelCase__ : Optional[str] = field(default=__lowerCamelCase , metadata={'help': 'The input training data file (a text file).'})
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
UpperCAmelCase__ : Optional[str] = field(
default=__lowerCamelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
UpperCAmelCase__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'})
UpperCAmelCase__ : Optional[int] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
UpperCAmelCase__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=__lowerCamelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCAmelCase__ : float = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'})
UpperCAmelCase__ : bool = field(
default=__lowerCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def lowerCAmelCase__ ( self: str ):
if self.train_file is not None:
__lowerCamelCase = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCamelCase = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowerCamelCase__ ( A__ : List[str] , A__ : List[str] ):
'''simple docstring'''
with open(A__ , """r""" , encoding="""utf-8""" ) as f:
__lowerCamelCase = [json.loads(A__ ) for line in f.read().splitlines() if (len(A__ ) > 0 and not line.isspace())]
assert len(A__ ) == len(A__ )
__lowerCamelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCamelCase = refs
return Dataset.from_dict(A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , A__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCamelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , )
__lowerCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , )
else:
__lowerCamelCase = {}
if data_args.train_file is not None:
__lowerCamelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCamelCase = data_args.validation_file
__lowerCamelCase = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
__lowerCamelCase = """text"""
__lowerCamelCase = load_dataset(A__ , data_files=A__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCamelCase = AutoConfig.from_pretrained(model_args.config_name , **A__ )
elif model_args.model_name_or_path:
__lowerCamelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **A__ )
else:
__lowerCamelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
__lowerCamelCase = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCamelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **A__ )
elif model_args.model_name_or_path:
__lowerCamelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **A__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
__lowerCamelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
__lowerCamelCase = AutoModelForMaskedLM.from_config(A__ )
model.resize_token_embeddings(len(A__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCamelCase = datasets["""train"""].column_names
else:
__lowerCamelCase = datasets["""validation"""].column_names
__lowerCamelCase = """text""" if """text""" in column_names else column_names[0]
__lowerCamelCase = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(A__ : Optional[Any] ):
# Remove empty lines
__lowerCamelCase = [line for line in examples["""text"""] if len(A__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=A__ , truncation=A__ , max_length=data_args.max_seq_length )
__lowerCamelCase = datasets.map(
A__ , batched=A__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCamelCase = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCamelCase = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCamelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCamelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCamelCase = DataCollatorForWholeWordMask(tokenizer=A__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCamelCase = Trainer(
model=A__ , args=A__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCamelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCamelCase = model_args.model_name_or_path
else:
__lowerCamelCase = None
__lowerCamelCase = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCamelCase = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(A__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
__lowerCamelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCamelCase = trainer.evaluate()
__lowerCamelCase = math.exp(eval_output["""eval_loss"""] )
__lowerCamelCase = perplexity
__lowerCamelCase = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(A__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 12 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Optional[int] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=1 ) -> str:
"""simple docstring"""
__magic_name__ = tokenizer
__magic_name__ = dataset
__magic_name__ = len(UpperCamelCase__ ) if n_tasks is None else n_tasks
__magic_name__ = n_copies
def __iter__( self : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
__magic_name__ = self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = start_length
__magic_name__ = eof_strings
__magic_name__ = tokenizer
def __call__( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__magic_name__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = re.split("""(%s)""" % """|""".join(A_ ), A_ )
# last string should be ""
return "".join(string_list[:-2] )
def a__ ( A_, A_, A_, A_, A_, A_=20, **A_ ):
'''simple docstring'''
__magic_name__ = defaultdict(A_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A_ ) ):
with torch.no_grad():
__magic_name__ = batch["""ids"""].shape[-1]
__magic_name__ = accelerator.unwrap_model(A_ ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]], num_return_sequences=A_, **A_ )
# each task is generated batch_size times
__magic_name__ = batch["""task_id"""].repeat(A_ )
__magic_name__ = accelerator.pad_across_processes(
A_, dim=1, pad_index=tokenizer.pad_token_id )
__magic_name__ , __magic_name__ = accelerator.gather((generated_tokens, generated_tasks) )
__magic_name__ = generated_tokens.cpu().numpy()
__magic_name__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A_, A_ ):
gen_token_dict[task].append(A_ )
__magic_name__ = [[] for _ in range(A_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__magic_name__ = tokenizer.decode(A_, skip_special_tokens=A_, clean_up_tokenization_spaces=A_ )
code_gens[task].append(remove_last_block(A_ ) )
return code_gens
def a__ ( ):
'''simple docstring'''
__magic_name__ = HfArgumentParser(A_ )
__magic_name__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__magic_name__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__magic_name__ = """false"""
if args.num_workers is None:
__magic_name__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__magic_name__ = Accelerator()
set_seed(args.seed, device_specific=A_ )
# Load model and tokenizer
__magic_name__ = AutoTokenizer.from_pretrained(args.model_ckpt )
__magic_name__ = tokenizer.eos_token
__magic_name__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__magic_name__ = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0, A_, A_ )] ),
}
# Load evaluation dataset and metric
__magic_name__ = load_dataset("""openai_humaneval""" )
__magic_name__ = load_metric("""code_eval""" )
__magic_name__ = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
__magic_name__ = args.n_samples // args.batch_size
__magic_name__ = TokenizedDataset(A_, human_eval["""test"""], n_copies=A_, n_tasks=A_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__magic_name__ = DataLoader(A_, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__magic_name__ = code_eval_metric.compute(references=[""""""], predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
__magic_name__ , __magic_name__ = accelerator.prepare(A_, A_ )
__magic_name__ = complete_code(
A_, A_, A_, A_, n_tasks=A_, batch_size=args.batch_size, **A_, )
if accelerator.is_main_process:
__magic_name__ = []
for task in tqdm(range(A_ ) ):
__magic_name__ = human_eval["""test"""][task]["""test"""]
__magic_name__ = f'''check({human_eval['test'][task]['entry_point']})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
__magic_name__ , __magic_name__ = code_eval_metric.compute(
references=A_, predictions=A_, num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file, """w""" ) as fp:
json.dump(A_, A_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 88 | 0 |
import os
import numpy
import onnx
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = a.name
SCREAMING_SNAKE_CASE_: int = b.name
SCREAMING_SNAKE_CASE_: Any = ""
SCREAMING_SNAKE_CASE_: Tuple = ""
SCREAMING_SNAKE_CASE_: List[Any] = a == b
SCREAMING_SNAKE_CASE_: Any = name_a
SCREAMING_SNAKE_CASE_: Optional[int] = name_b
return res
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_UpperCAmelCase , _UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _UpperCAmelCase , _UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _UpperCAmelCase , _UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for n in graph_proto.node:
_node_replace_input_with(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_: Optional[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
SCREAMING_SNAKE_CASE_: Optional[Any] = inits[i].name
SCREAMING_SNAKE_CASE_: Union[str, Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.dirname(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = os.path.basename(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = onnx.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Any = list(model.graph.initializer )
SCREAMING_SNAKE_CASE_: Optional[Any] = set()
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Any = []
SCREAMING_SNAKE_CASE_: int = 0
for i in range(len(_UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_UpperCAmelCase )
dup_set.add(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = inits[j].data_type
SCREAMING_SNAKE_CASE_: Any = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , _UpperCAmelCase )
total_reduced_size += mem_size
SCREAMING_SNAKE_CASE_: Optional[int] = inits[i].name
SCREAMING_SNAKE_CASE_: Optional[int] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_: List[Any] = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 10_24 / 10_24 / 10_24 , "GB" )
SCREAMING_SNAKE_CASE_: List[Any] = sorted(_UpperCAmelCase )
_remove_dup_initializers_from_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = "optimized_" + model_file_name
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
onnx.save(_UpperCAmelCase , _UpperCAmelCase )
return new_model
| 13 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
'''simple docstring'''
__magic_name__ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""", type=A_, default=1, help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""", type=A_, help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
), )
# rest from the training program
parser.add_argument("""training_script_args""", nargs=A_ )
return parser.parse_args()
def a__ ( ):
'''simple docstring'''
__magic_name__ = parse_args()
# Import training_script as a module.
__magic_name__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__magic_name__ = script_fpath.stem
__magic_name__ = importlib.import_module(A_ )
# Patch sys.argv
__magic_name__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 88 | 0 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
create_state_space_tree(lowercase_ , [] , 0 )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_ , lowercase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 14 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pegasus"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
| 88 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any] ):
__A = tempfile.mkdtemp()
__A = 5
# Realm tok
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,"realm_tokenizer" )
os.makedirs(A ,exist_ok=A )
__A = os.path.join(A ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__A = os.path.join(self.tmpdirname ,"realm_block_records" )
os.makedirs(A ,exist_ok=A )
def UpperCamelCase_ ( self : int ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"realm_tokenizer" ) )
def UpperCamelCase_ ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : int ):
__A = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def UpperCamelCase_ ( self : List[Any] ):
__A = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] ,dtype=A ,)
return block_records
def UpperCamelCase_ ( self : Tuple ):
__A = RealmRetriever(
block_records=self.get_dummy_block_records() ,tokenizer=self.get_tokenizer() ,)
return retriever
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_config()
__A = self.get_dummy_retriever()
__A = retriever.tokenizer
__A = np.array([0, 3] ,dtype="long" )
__A = tokenizer(["Test question"] ).input_ids
__A = tokenizer(
["the fourth"] ,add_special_tokens=A ,return_token_type_ids=A ,return_attention_mask=A ,).input_ids
__A = config.reader_seq_len
__A , __A , __A , __A = retriever(
A ,A ,answer_ids=A ,max_length=A ,return_tensors="np" )
self.assertEqual(len(A ) ,2 )
self.assertEqual(len(A ) ,2 )
self.assertEqual(len(A ) ,2 )
self.assertEqual(concat_inputs.input_ids.shape ,(2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape ,(2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape ,(2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape ,(2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) ,["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] ,)
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) ,["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] ,)
def UpperCamelCase_ ( self : Dict ):
__A = self.get_config()
__A = self.get_dummy_retriever()
__A = retriever.tokenizer
__A = np.array([0, 3, 5] ,dtype="long" )
__A = tokenizer(["Test question"] ).input_ids
__A = tokenizer(
["the fourth", "longer longer"] ,add_special_tokens=A ,return_token_type_ids=A ,return_attention_mask=A ,).input_ids
__A = config.reader_seq_len
__A , __A , __A , __A = retriever(
A ,A ,answer_ids=A ,max_length=A ,return_tensors="np" )
self.assertEqual([False, True, True] ,A )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] ,A )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] ,A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname ,"realm_block_records" ) )
# Test local path
__A = retriever.from_pretrained(os.path.join(self.tmpdirname ,"realm_block_records" ) )
self.assertEqual(retriever.block_records[0] ,B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
__A = os.path.join(
os.path.join(self.tmpdirname ,"realm_block_records" ) ,_REALM_BLOCK_RECORDS_FILENAME )
__A = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] ,B"This is the first record" )
| 15 |
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Optional[int] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : Optional[int] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Tuple=False , ) -> Dict:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in predictions] )
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in references] )
else:
__magic_name__ = np.asarray(UpperCamelCase__ )
__magic_name__ = np.asarray(UpperCamelCase__ )
if ignore_case:
__magic_name__ = np.char.lower(UpperCamelCase__ )
__magic_name__ = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
__magic_name__ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
__magic_name__ = string.digits.maketrans("""""" , """""" , string.digits )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 88 | 0 |
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __A :
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : int ) -> Tuple:
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase__ : Dict = img
lowercase__ : List[Any] = img.shape[1]
lowercase__ : Optional[Any] = img.shape[0]
lowercase__ : List[Any] = dst_width
lowercase__ : Any = dst_height
lowercase__ : Union[str, Any] = self.src_w / self.dst_w
lowercase__ : str = self.src_h / self.dst_h
lowercase__ : Any = (
np.ones((self.dst_h, self.dst_w, 3) ,np.uinta ) * 255
)
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase__ : Optional[int] = self.img[self.get_y(_snake_case )][self.get_x(_snake_case )]
def UpperCAmelCase ( self : Dict ,_snake_case : int ) -> int:
"""simple docstring"""
return int(self.ratio_x * x )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : int ) -> int:
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCAmelCase_ ,lowerCAmelCase_ = 800, 600
lowerCAmelCase_ = imread('image_data/lena.jpg', 1)
lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 16 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = emb.weight.shape
__magic_name__ = nn.Linear(A_, A_, bias=A_ )
__magic_name__ = emb.weight.data
return lin_layer
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = torch.load(A_, map_location="""cpu""" )
__magic_name__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
__magic_name__ = checkpoint["""model"""]
remove_ignore_keys_(A_ )
__magic_name__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
__magic_name__ = {key.replace("""decoder""", """model""" ): val for key, val in state_dict.items()}
__magic_name__ = XGLMConfig(
vocab_size=A_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""gelu""", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
__magic_name__ = XGLMForCausalLM(A_ )
__magic_name__ = model.load_state_dict(A_, strict=A_ )
print(A_ )
__magic_name__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 88 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a = '\\n\n'
_a = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
_a = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ), reference_urls=["https://huggingface.co/docs/transformers/perplexity"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : Any, UpperCAmelCase__ : int = 1_6, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Dict=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowercase = "cuda"
else:
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
__lowercase = AutoModelForCausalLM.from_pretrained(UpperCAmelCase__ )
__lowercase = model.to(UpperCAmelCase__ )
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowercase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCAmelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowercase = model.config.max_length - 1
else:
__lowercase = model.config.max_length
__lowercase = tokenizer(
UpperCAmelCase__, add_special_tokens=UpperCAmelCase__, padding=UpperCAmelCase__, truncation=UpperCAmelCase__, max_length=UpperCAmelCase__, return_tensors="pt", return_attention_mask=UpperCAmelCase__, ).to(UpperCAmelCase__ )
__lowercase = encodings["input_ids"]
__lowercase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ), 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ), 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowercase = []
__lowercase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0, len(UpperCAmelCase__ ), UpperCAmelCase__ ) ):
__lowercase = min(start_index + batch_size, len(UpperCAmelCase__ ) )
__lowercase = encoded_texts[start_index:end_index]
__lowercase = attn_masks[start_index:end_index]
if add_start_token:
__lowercase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCAmelCase__ )
__lowercase = torch.cat([bos_tokens_tensor, encoded_batch], dim=1 )
__lowercase = torch.cat(
[torch.ones(bos_tokens_tensor.size(), dtype=torch.intaa ).to(UpperCAmelCase__ ), attn_mask], dim=1 )
__lowercase = encoded_batch
with torch.no_grad():
__lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__ ).logits
__lowercase = out_logits[..., :-1, :].contiguous()
__lowercase = labels[..., 1:].contiguous()
__lowercase = attn_mask[..., 1:].contiguous()
__lowercase = torch.expa(
(loss_fct(shift_logits.transpose(1, 2 ), UpperCAmelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCAmelCase__ )}
| 17 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Any = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
__lowerCAmelCase : str = '|'.join(sys.argv[1:])
__lowerCAmelCase : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 88 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ :
def __init__( self : List[Any],_A : Dict,_A : List[Any]=3,_A : Optional[int]=32,_A : str=3,_A : Optional[int]=10,_A : int=[10, 20, 30, 40],_A : str=[1, 1, 2, 1],_A : Tuple=True,_A : List[Any]=True,_A : int="relu",_A : List[Any]=3,_A : Dict=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : Tuple = batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = image_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = embeddings_size
SCREAMING_SNAKE_CASE_ : int = hidden_sizes
SCREAMING_SNAKE_CASE_ : Dict = depths
SCREAMING_SNAKE_CASE_ : int = is_training
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : int = scope
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size],self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,image_size=self.image_size,)
def __UpperCamelCase ( self : Optional[Any],_A : int,_A : Tuple,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFResNetModel(config=_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),)
def __UpperCamelCase ( self : Dict,_A : int,_A : Optional[Any],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.num_labels
SCREAMING_SNAKE_CASE_ : str = TFResNetForImageClassification(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
A = False
A = False
A = False
A = False
A = False
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFResNetModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self,config_class=_A,has_text_modality=_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
def check_hidden_states_output(_A : int,_A : Tuple,_A : str ):
SCREAMING_SNAKE_CASE_ : List[str] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(**self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_A ),expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 4, self.model_tester.image_size // 4],)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_ : List[str] = layer_type
SCREAMING_SNAKE_CASE_ : str = True
check_hidden_states_output(_A,_A,_A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(_A,_A,_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[str] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE_ : int = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Dict = prepare_img()
SCREAMING_SNAKE_CASE_ : int = image_processor(images=_A,return_tensors="tf" )
# forward pass
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape,_A )
SCREAMING_SNAKE_CASE_ : Tuple = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(),_A,atol=1E-4 ) )
| 18 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=99 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=36 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Union[str, Any]=6 , UpperCamelCase__ : int=37 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=16 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Dict=None , ) -> Any:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = embedding_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_hidden_groups
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowercase ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = AlbertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
__magic_name__ = AlbertForPreTraining(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , sentence_order_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = AlbertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = AlbertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = AlbertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = AlbertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.num_choices
__magic_name__ = AlbertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
def _lowercase ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def _lowercase ( self : int ) -> int:
"""simple docstring"""
__magic_name__ = AlbertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = AlbertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = AlbertModel.from_pretrained("""albert-base-v2""" )
__magic_name__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__magic_name__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__magic_name__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) )
| 88 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCamelCase_ ( lowerCamelCase__=None ):
if subparsers is not None:
lowerCamelCase_ = subparsers.add_parser("test" )
else:
lowerCamelCase_ = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=lowerCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase__ )
return parser
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
lowerCamelCase_ = script_name
else:
lowerCamelCase_ = F'--config_file={args.config_file} {script_name}'
lowerCamelCase_ = ["accelerate-launch"] + test_args.split()
lowerCamelCase_ = execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def lowerCamelCase_ ( ):
lowerCamelCase_ = test_command_parser()
lowerCamelCase_ = parser.parse_args()
test_command(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 19 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """biogpt"""
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=4_2384 , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : Any=24 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Tuple=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : str=1024 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[str]=1E-12 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : List[str]=2 , **UpperCamelCase__ : Optional[int] , ) -> Tuple:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = scale_embedding
__magic_name__ = use_cache
__magic_name__ = layerdrop
__magic_name__ = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 88 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase , lowercase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase : str = 0
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col - 1 , SCREAMING_SNAKE_CASE__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowerCAmelCase : Any = get_logger(__name__)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = (
os.path.join(UpperCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__magic_name__ = Extractor
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__magic_name__ = os.path.abspath(UpperCamelCase__ )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase__ ) )
def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(UpperCamelCase__ ) and not (os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ ))
)
def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> str:
"""simple docstring"""
__magic_name__ = self.extractor.infer_extractor_format(UpperCamelCase__ )
if not extractor_format:
return input_path
__magic_name__ = self._get_output_path(UpperCamelCase__ )
if self._do_extract(UpperCamelCase__ , UpperCamelCase__ ):
self.extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return output_path
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
@abstractmethod
def _lowercase ( cls : List[str] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : Union[str, Any] ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
...
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
a__ = []
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ , """rb""" ) as f:
return f.read(UpperCamelCase__ )
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
__magic_name__ = max(len(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
try:
__magic_name__ = cls.read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(UpperCamelCase__ )
@staticmethod
def _lowercase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
def resolved(UpperCamelCase__ : str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase__ ) )
def badpath(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ).startswith(UpperCamelCase__ )
def badlink(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
__magic_name__ = resolved(os.path.join(UpperCamelCase__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase__ )
__magic_name__ = resolved(UpperCamelCase__ )
for finfo in members:
if badpath(finfo.name , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = tarfile.open(UpperCamelCase__ )
tar_file.extractall(UpperCamelCase__ , members=TarExtractor.safemembers(UpperCamelCase__ , UpperCamelCase__ ) )
tar_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x1F\x8B"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(UpperCamelCase__ , """rb""" ) as gzip_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase__ , """rb""" ) as fp:
__magic_name__ = _EndRecData(UpperCamelCase__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__magic_name__ = fp.read(UpperCamelCase__ ) # CD is where we expect it to be
if len(UpperCamelCase__ ) == sizeCentralDir:
__magic_name__ = struct.unpack(UpperCamelCase__ , UpperCamelCase__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with zipfile.ZipFile(UpperCamelCase__ , """r""" ) as zip_file:
zip_file.extractall(UpperCamelCase__ )
zip_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(UpperCamelCase__ ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = rarfile.RarFile(UpperCamelCase__ )
rf.extractall(UpperCamelCase__ )
rf.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__magic_name__ = zstd.ZstdDecompressor()
with open(UpperCamelCase__ , """rb""" ) as ifh, open(UpperCamelCase__ , """wb""" ) as ofh:
dctx.copy_stream(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x42\x5A\x68"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with pyazr.SevenZipFile(UpperCamelCase__ , """r""" ) as archive:
archive.extractall(UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ :
'''simple docstring'''
a__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowercase ( cls : Tuple ) -> Tuple:
"""simple docstring"""
return max(
len(UpperCamelCase__ )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase__ , UpperCamelCase__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase__ , magic_number_length=UpperCamelCase__ )
except OSError:
return b""
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = cls.infer_extractor_format(UpperCamelCase__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowercase ( cls : Dict , UpperCamelCase__ : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
__magic_name__ = cls._get_magic_number_max_length()
__magic_name__ = cls._read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return extractor_format
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(UpperCamelCase__ ) , exist_ok=UpperCamelCase__ )
# Prevent parallel extractions
__magic_name__ = str(Path(UpperCamelCase__ ).with_suffix(""".lock""" ) )
with FileLock(UpperCamelCase__ ):
shutil.rmtree(UpperCamelCase__ , ignore_errors=UpperCamelCase__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = extractor if extractor != """deprecated""" else extractor_format
else:
__magic_name__ = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=UpperCamelCase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase__ ):
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
| 88 | 0 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
SCREAMING_SNAKE_CASE : Any = get_logger()
SCREAMING_SNAKE_CASE : Optional[dict] = None
class _lowerCamelCase( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
super().__init__(features=lowerCamelCase)
import jax
from jaxlib.xla_client import Device
if isinstance(lowerCamelCase, lowerCamelCase):
raise ValueError(
F'''Expected {device} to be a `str` not {type(lowerCamelCase)}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.')
_lowercase : int = device if isinstance(lowerCamelCase, lowerCamelCase) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowercase : List[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '''
F'''device: {str(jax.devices()[0])}.''')
_lowercase : Dict = str(jax.devices()[0])
_lowercase : str = jnp_array_kwargs
@staticmethod
def UpperCamelCase ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
"""simple docstring"""
import jax
return {str(lowerCamelCase): device for device in jax.devices()}
def UpperCamelCase ( self, lowerCamelCase) -> Any:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(lowerCamelCase, lowerCamelCase) and column:
if all(
isinstance(lowerCamelCase, jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(lowerCamelCase, axis=0)
return column
def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(lowerCamelCase, (str, bytes, type(lowerCamelCase))):
return value
elif isinstance(lowerCamelCase, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value.tolist()
_lowercase : Any = {}
if isinstance(lowerCamelCase, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_lowercase : Dict = {'dtype': jnp.intaa}
else:
_lowercase : List[str] = {'dtype': jnp.intaa}
elif isinstance(lowerCamelCase, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
_lowercase : Tuple = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase, PIL.Image.Image):
_lowercase : List[str] = np.asarray(lowerCamelCase)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_lowercase : Any = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowerCamelCase, **{**default_dtype, **self.jnp_array_kwargs})
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowerCamelCase, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(lowerCamelCase, '__array__') and not isinstance(lowerCamelCase, jax.Array):
_lowercase : Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase, np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase) for substruct in data_struct])
elif isinstance(lowerCamelCase, (list, tuple)):
return self._consolidate([self.recursive_tensorize(lowerCamelCase) for substruct in data_struct])
return self._tensorize(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
return map_nested(self._recursive_tensorize, lowerCamelCase, map_list=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> Mapping:
"""simple docstring"""
_lowercase : Dict = self.numpy_arrow_extractor().extract_row(lowerCamelCase)
_lowercase : Tuple = self.python_features_decoder.decode_row(lowerCamelCase)
return self.recursive_tensorize(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> "jax.Array":
"""simple docstring"""
_lowercase : Optional[Any] = self.numpy_arrow_extractor().extract_column(lowerCamelCase)
_lowercase : Union[str, Any] = self.python_features_decoder.decode_column(lowerCamelCase, pa_table.column_names[0])
_lowercase : List[str] = self.recursive_tensorize(lowerCamelCase)
_lowercase : Tuple = self._consolidate(lowerCamelCase)
return column
def UpperCamelCase ( self, lowerCamelCase) -> Mapping:
"""simple docstring"""
_lowercase : str = self.numpy_arrow_extractor().extract_batch(lowerCamelCase)
_lowercase : Dict = self.python_features_decoder.decode_batch(lowerCamelCase)
_lowercase : List[str] = self.recursive_tensorize(lowerCamelCase)
for column_name in batch:
_lowercase : str = self._consolidate(batch[column_name])
return batch
| 21 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 0 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__SCREAMING_SNAKE_CASE :List[Any] = 0b1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__SCREAMING_SNAKE_CASE :Optional[Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class A_ :
def __init__( self : List[Any] ):
_UpperCAmelCase = WATERMARK_BITS
_UpperCAmelCase = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark )
def lowercase ( self : int , snake_case_ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 2_5_6:
return images
_UpperCAmelCase = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase = [self.encoder.encode(snake_case_ , "dwtDct" ) for image in images]
_UpperCAmelCase = torch.from_numpy(np.array(snake_case_ ) ).permute(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 )
return images
| 22 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: List[Any] = logging.get_logger(__name__)
UpperCamelCase__: List[Any] = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """lxmert"""
lowerCamelCase__ = {}
def __init__( self : Tuple , __snake_case : int=30522 , __snake_case : Union[str, Any]=768 , __snake_case : List[str]=12 , __snake_case : Any=9500 , __snake_case : int=1600 , __snake_case : Any=400 , __snake_case : Dict=3072 , __snake_case : int="gelu" , __snake_case : List[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : Optional[Any]=512 , __snake_case : str=2 , __snake_case : Optional[Any]=0.02 , __snake_case : Optional[Any]=1E-12 , __snake_case : Dict=9 , __snake_case : Any=5 , __snake_case : int=5 , __snake_case : Tuple=2048 , __snake_case : Union[str, Any]=4 , __snake_case : Optional[Any]=6.67 , __snake_case : Optional[int]=True , __snake_case : str=True , __snake_case : List[Any]=True , __snake_case : List[Any]=True , __snake_case : int=True , __snake_case : Dict=True , __snake_case : int=True , **__snake_case : int , ) -> Optional[int]:
UpperCAmelCase : List[str] = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : List[Any] = num_qa_labels
UpperCAmelCase : Optional[Any] = num_object_labels
UpperCAmelCase : Optional[int] = num_attr_labels
UpperCAmelCase : List[Any] = l_layers
UpperCAmelCase : Optional[Any] = x_layers
UpperCAmelCase : Optional[Any] = r_layers
UpperCAmelCase : Union[str, Any] = visual_feat_dim
UpperCAmelCase : Dict = visual_pos_dim
UpperCAmelCase : Optional[int] = visual_loss_normalizer
UpperCAmelCase : Any = task_matched
UpperCAmelCase : List[Any] = task_mask_lm
UpperCAmelCase : List[str] = task_obj_predict
UpperCAmelCase : List[Any] = task_qa
UpperCAmelCase : Any = visual_obj_loss
UpperCAmelCase : Any = visual_attr_loss
UpperCAmelCase : Dict = visual_feat_loss
UpperCAmelCase : Union[str, Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__snake_case )
| 23 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = generator("""Something there""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
__magic_name__ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
__magic_name__ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
__magic_name__ = 3
__magic_name__ = generator(
"""Something there""" , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
__magic_name__ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = generator("""This is a test""" , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
__magic_name__ = generator.model.config.eos_token_id
__magic_name__ = """<pad>"""
__magic_name__ = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowercase ( self : int ) -> str:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
| 88 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> bool:
if len(snake_case_ ) == 0:
return False
__snake_case = len(snake_case_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case_ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case_ )
if __name__ == "__main__":
snake_case_ = input('Enter numbers separated by comma:\n').strip()
snake_case_ = [int(item.strip()) for item in user_input.split(',')]
snake_case_ = int(input('Enter the number to be found in the list:\n').strip())
snake_case_ = '' if binary_search(sequence, target) else 'not '
print(F'{target} was {not_str}found in {sequence}')
| 24 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : List[Any] = 16
__lowerCAmelCase : Any = 32
def a__ ( A_, A_, A_, A_, A_ = 16 ):
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__magic_name__ = DatasetDict(
{
"""train""": dataset["""train"""].select(A_ ),
"""validation""": dataset["""train"""].select(A_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(A_ ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=A_, max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ = datasets.map(
A_, batched=A_, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(A_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ = 16
elif accelerator.mixed_precision != "no":
__magic_name__ = 8
else:
__magic_name__ = None
return tokenizer.pad(
A_, padding="""longest""", max_length=A_, pad_to_multiple_of=A_, return_tensors="""pt""", )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets["""train"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""validation"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""test"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
return train_dataloader, eval_dataloader, test_dataloader
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = []
# Download the dataset
__magic_name__ = load_dataset("""glue""", """mrpc""" )
# Create our splits
__magic_name__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__magic_name__ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config["""lr"""]
__magic_name__ = int(config["""num_epochs"""] )
__magic_name__ = int(config["""seed"""] )
__magic_name__ = int(config["""batch_size"""] )
__magic_name__ = evaluate.load("""glue""", """mrpc""" )
# If the batch size is too big we use gradient accumulation
__magic_name__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ = MAX_GPU_BATCH_SIZE
set_seed(A_ )
# New Code #
# Create our folds:
__magic_name__ = kfold.split(np.zeros(datasets["""train"""].num_rows ), datasets["""train"""]["""label"""] )
__magic_name__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A_ ):
__magic_name__ , __magic_name__ , __magic_name__ = get_fold_dataloaders(
A_, A_, A_, A_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=A_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ = AdamW(params=model.parameters(), lr=A_ )
# Instantiate scheduler
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=A_, num_warmup_steps=100, num_training_steps=(len(A_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
A_, A_, A_, A_, A_ )
# Now we train the model
for epoch in range(A_ ):
model.train()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ = model(**A_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=A_, references=A_, )
__magic_name__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''', A_ )
# New Code #
# We also run predictions on the test set at the very end
__magic_name__ = []
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__magic_name__ = torch.cat(A_, dim=0 )
__magic_name__ = torch.stack(A_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__magic_name__ = metric.compute(predictions=A_, references=A_ )
accelerator.print("""Average test metrics from all folds:""", A_ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=A_, default=A_, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""", type=A_, default=3, help="""The number of splits to perform across the dataset""" )
__magic_name__ = parser.parse_args()
__magic_name__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A_, A_ )
if __name__ == "__main__":
main()
| 88 | 0 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
UpperCAmelCase__ : List[str] = {
'n_samples': 6_4,
'horizon': 3_2,
'num_inference_steps': 2_0,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
UpperCAmelCase__ : Any = 'hopper-medium-v2'
UpperCAmelCase__ : Union[str, Any] = gym.make(env_name)
UpperCAmelCase__ : List[str] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
UpperCAmelCase__ : str = env.reset()
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[Any] = 1_0_0_0
UpperCAmelCase__ : Union[str, Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
UpperCAmelCase__ : Optional[Any] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = env.step(denorm_actions)
UpperCAmelCase__ : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
f""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
UpperCAmelCase__ : List[Any] = next_observation
except KeyboardInterrupt:
pass
print(f"""Total reward: {total_reward}""")
| 25 |
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(A_ ) == 1:
return True
__magic_name__ = series[1] - series[0]
for index in range(len(A_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
__magic_name__ = 0
for val in series:
answer += val
return answer / len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowercase ( UpperCamelCase__ ):
@slow
@require_torch
def a__ ( self ) -> Dict:
_A : int = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_A : Tuple = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_A : Dict = bertabert.config.encoder.vocab_size
_A : List[str] = tokenizer.sep_token_id
_A : Dict = tokenizer.cls_token_id
_A : List[Any] = 128
_A : Tuple = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_A : List[str] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_A : Union[str, Any] = train_dataset.select(range(32 ) )
_A : str = val_dataset.select(range(16 ) )
_A : Dict = 4
def _map_to_encoder_decoder_inputs(_a ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_A : List[Any] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_a , max_length=512 )
_A : int = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_a , max_length=128 )
_A : Union[str, Any] = inputs.input_ids
_A : List[Any] = inputs.attention_mask
_A : Dict = outputs.input_ids
_A : str = outputs.input_ids.copy()
_A : Union[str, Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_A : Dict = outputs.attention_mask
assert all(len(_a ) == 512 for x in inputs.input_ids )
assert all(len(_a ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_a ):
_A : int = pred.label_ids
_A : str = pred.predictions
# all unnecessary tokens are removed
_A : Dict = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_A : Optional[Any] = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_A : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_a ) )] ) / len(_a )
return {"accuracy": accuracy}
# map train dataset
_A : Dict = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_A : Optional[int] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_A : Optional[int] = self.get_auto_remove_tmp_dir()
_A : Any = SeqaSeqTrainingArguments(
output_dir=_a , per_device_train_batch_size=_a , per_device_eval_batch_size=_a , predict_with_generate=_a , evaluation_strategy="""steps""" , do_train=_a , do_eval=_a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_A : Optional[Any] = SeqaSeqTrainer(
model=_a , args=_a , compute_metrics=_compute_metrics , train_dataset=_a , eval_dataset=_a , tokenizer=_a , )
# start training
trainer.train()
| 26 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=("DownEncoderBlock2D",) , UpperCamelCase__ : Optional[Any]=(64,) , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : List[str]=True , ) -> str:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
# down
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = 2 * out_channels if double_z else out_channels
__magic_name__ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = x
__magic_name__ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : int ):
def custom_forward(*UpperCamelCase__ : str ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
__magic_name__ = down_block(UpperCamelCase__ )
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ )
# post-process
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[Any]=("UpDecoderBlock2D",) , UpperCamelCase__ : List[Any]=(64,) , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[int]="silu" , UpperCamelCase__ : Tuple="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
__magic_name__ = in_channels if norm_type == """spatial""" else None
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
__magic_name__ = list(reversed(UpperCamelCase__ ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
__magic_name__ = output_channel
# out
if norm_type == "spatial":
__magic_name__ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None ) -> Tuple:
"""simple docstring"""
__magic_name__ = z
__magic_name__ = self.conv_in(UpperCamelCase__ )
__magic_name__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Optional[int] ):
def custom_forward(*UpperCamelCase__ : int ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
else:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict="random" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=True ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = n_e
__magic_name__ = vq_embed_dim
__magic_name__ = beta
__magic_name__ = legacy
__magic_name__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__magic_name__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__magic_name__ = self.used.shape[0]
__magic_name__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__magic_name__ = self.re_embed
__magic_name__ = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__magic_name__ = n_e
__magic_name__ = sane_index_shape
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
__magic_name__ = (inds[:, :, None] == used[None, None, ...]).long()
__magic_name__ = match.argmax(-1 )
__magic_name__ = match.sum(2 ) < 1
if self.unknown_index == "random":
__magic_name__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__magic_name__ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
__magic_name__ = 0 # simply set to zero
__magic_name__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
__magic_name__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__magic_name__ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
__magic_name__ = self.embedding(UpperCamelCase__ ).view(z.shape )
__magic_name__ = None
__magic_name__ = None
# compute loss for embedding
if not self.legacy:
__magic_name__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__magic_name__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__magic_name__ = z + (z_q - z).detach()
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__magic_name__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__magic_name__ = self.remap_to_used(UpperCamelCase__ )
__magic_name__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__magic_name__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
if self.remap is not None:
__magic_name__ = indices.reshape(shape[0] , -1 ) # add batch axis
__magic_name__ = self.unmap_to_all(UpperCamelCase__ )
__magic_name__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__magic_name__ = self.embedding(UpperCamelCase__ )
if shape is not None:
__magic_name__ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parameters
__magic_name__ , __magic_name__ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
__magic_name__ = torch.clamp(self.logvar , -30.0 , 20.0 )
__magic_name__ = deterministic
__magic_name__ = torch.exp(0.5 * self.logvar )
__magic_name__ = torch.exp(self.logvar )
if self.deterministic:
__magic_name__ = __magic_name__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
__magic_name__ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__magic_name__ = self.mean + self.std * sample
return x
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
__magic_name__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.mean
| 88 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCAmelCase_ ):
A_ = ["flax", "transformers"]
def __init__( self , *__a , **__a ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class __UpperCamelCase ( metaclass=lowerCAmelCase_ ):
A_ = ["flax", "transformers"]
def __init__( self , *__a , **__a ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class __UpperCamelCase ( metaclass=lowerCAmelCase_ ):
A_ = ["flax", "transformers"]
def __init__( self , *__a , **__a ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
class __UpperCamelCase ( metaclass=lowerCAmelCase_ ):
A_ = ["flax", "transformers"]
def __init__( self , *__a , **__a ):
'''simple docstring'''
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['flax', 'transformers'] )
| 27 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Any=[1, 2, 1] , UpperCamelCase__ : int=[2, 2, 4] , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=2.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCamelCase__ : Tuple=[1, 2, 3] , ) -> Dict:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
__magic_name__ = window_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_absolute_embeddings
__magic_name__ = patch_norm
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = is_training
__magic_name__ = scope
__magic_name__ = use_labels
__magic_name__ = type_sequence_label_size
__magic_name__ = encoder_stride
__magic_name__ = out_features
__magic_name__ = out_indices
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase__ ):
__magic_name__ = ["""stem"""]
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swin has a different seq_length
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCamelCase__ : Union[str, Any] ):
__magic_name__ = 0
return t
def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int={} ):
with torch.no_grad():
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has'''
F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.'''
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _A ):
'''simple docstring'''
a__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ = MaskFormerSwinConfig
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__magic_name__ = backbone_class(UpperCamelCase__ )
backbone.to(UpperCamelCase__ )
backbone.eval()
__magic_name__ = backbone(**UpperCamelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__magic_name__ = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__magic_name__ , __magic_name__ , __magic_name__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__magic_name__ = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertIsNotNone(outputs.attentions )
| 88 | 0 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( A__ ) -> Union[str, Any]:
"""simple docstring"""
print('Loading config file...' )
def flatten_yaml_as_dict(A__ , A__="" , A__="." ):
UpperCamelCase = []
for k, v in d.items():
UpperCamelCase = parent_key + sep + k if parent_key else k
if isinstance(A__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(A__ , A__ , sep=A__ ).items() )
else:
items.append((new_key, v) )
return dict(A__ )
UpperCamelCase = argparse.Namespace()
with open(A__ , 'r' ) as yaml_file:
try:
UpperCamelCase = yaml.load(A__ , Loader=yaml.FullLoader )
UpperCamelCase = flatten_yaml_as_dict(A__ )
for k, v in flat_cfg.items():
setattr(A__ , A__ , A__ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(A__ , str(A__ ) ) )
return config
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = MobileViTVaConfig()
UpperCamelCase = False
# dataset
if task_name.startswith('imagenet1k_' ):
UpperCamelCase = 1_000
if int(task_name.strip().split('_' )[-1] ) == 384:
UpperCamelCase = 384
else:
UpperCamelCase = 256
UpperCamelCase = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
UpperCamelCase = 21_000
if int(task_name.strip().split('_' )[-1] ) == 384:
UpperCamelCase = 384
else:
UpperCamelCase = 256
UpperCamelCase = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
UpperCamelCase = 151
UpperCamelCase = 512
UpperCamelCase = 'ade20k-id2label.json'
UpperCamelCase = True
elif task_name.startswith('voc_' ):
UpperCamelCase = 21
UpperCamelCase = 512
UpperCamelCase = 'pascal-voc-id2label.json'
UpperCamelCase = True
# orig_config
UpperCamelCase = load_orig_config_file(A__ )
assert getattr(A__ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
UpperCamelCase = getattr(A__ , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(A__ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCamelCase = getattr(A__ , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCamelCase = getattr(A__ , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
UpperCamelCase = getattr(A__ , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
UpperCamelCase = getattr(A__ , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 )
UpperCamelCase = getattr(A__ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) )
UpperCamelCase = {int(A__ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = dct.pop(A__ )
UpperCamelCase = val
def __lowerCamelCase ( A__ , A__=False ) -> Tuple:
"""simple docstring"""
if base_model:
UpperCamelCase = ''
else:
UpperCamelCase = 'mobilevitv2.'
UpperCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCamelCase = k[8:]
else:
UpperCamelCase = k
if ".block." in k:
UpperCamelCase = k_new.replace('.block.' , '.' )
if ".conv." in k:
UpperCamelCase = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
UpperCamelCase = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
UpperCamelCase = k_new.replace('conv_1.' , F"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if F"""layer_{i}.""" in k:
UpperCamelCase = k_new.replace(F"""layer_{i}.""" , F"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
UpperCamelCase = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
UpperCamelCase = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F"""layer_{i}.0.""" in k:
UpperCamelCase = k_new.replace(F"""layer_{i}.0.""" , F"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if F"""layer_{i}.1.local_rep.0.""" in k:
UpperCamelCase = k_new.replace(F"""layer_{i}.1.local_rep.0.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if F"""layer_{i}.1.local_rep.1.""" in k:
UpperCamelCase = k_new.replace(F"""layer_{i}.1.local_rep.1.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
UpperCamelCase = [0, 1]
elif i == 4:
UpperCamelCase = [0, 1, 2, 3]
elif i == 5:
UpperCamelCase = [0, 1, 2]
for j in j_in:
if F"""layer_{i}.1.global_rep.{j}.""" in k:
UpperCamelCase = k_new.replace(
F"""layer_{i}.1.global_rep.{j}.""" , F"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if F"""layer_{i}.1.global_rep.{j+1}.""" in k:
UpperCamelCase = k_new.replace(
F"""layer_{i}.1.global_rep.{j+1}.""" , F"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if F"""layer_{i}.1.conv_proj.""" in k:
UpperCamelCase = k_new.replace(F"""layer_{i}.1.conv_proj.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
UpperCamelCase = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
UpperCamelCase = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
UpperCamelCase = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
UpperCamelCase = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
UpperCamelCase = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
UpperCamelCase = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
UpperCamelCase = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
UpperCamelCase = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
UpperCamelCase = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(A__ )
for k in keys_to_ignore:
state_dict.pop(A__ , A__ )
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCamelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = get_mobilevitva_config(A__ , A__ )
# load original state_dict
UpperCamelCase = torch.load(A__ , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
UpperCamelCase = MobileViTVaForSemanticSegmentation(A__ ).eval()
UpperCamelCase = False
else:
UpperCamelCase = MobileViTVaForImageClassification(A__ ).eval()
UpperCamelCase = False
# remove and rename some keys of load the original model
UpperCamelCase = checkpoint
remove_unused_keys(A__ )
UpperCamelCase = create_rename_keys(A__ , base_model=A__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A__ , A__ , A__ )
# load modified state_dict
model.load_state_dict(A__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCamelCase = image_processor(images=prepare_img() , return_tensors='pt' )
UpperCamelCase = model(**A__ )
# verify classification model
if task_name.startswith('imagenet' ):
UpperCamelCase = outputs.logits
UpperCamelCase = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCamelCase = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , A__ , atol=1e-4 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 28 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : int ) -> None:
"""simple docstring"""
__magic_name__ = value
__magic_name__ = None
__magic_name__ = None
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Node ) -> None:
"""simple docstring"""
__magic_name__ = tree
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
def lowercase__ ( __snake_case : List[str] , __snake_case : str , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : str ):
'''simple docstring'''
if index == r:
for j in range(__snake_case ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase_ : Tuple = arr[i]
combination_util(__snake_case , __snake_case , __snake_case , index + 1 , __snake_case , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowercase__ ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__snake_case , __snake_case , __snake_case , 0 , __snake_case , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__UpperCAmelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 29 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : str = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__a = logging.get_logger(__name__)
class lowercase__:
"""simple docstring"""
a :str
a :str = None
@staticmethod
def _lowercase ( ) -> Optional[int]:
raise NotImplementedError
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
raise NotImplementedError
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
raise NotImplementedError
def _lowercase ( self : Tuple ) -> Union[str, Any]:
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def _lowercase ( cls : Dict ) -> Dict:
return f'''`pip install {cls.pip_package or cls.name}`'''
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = 'optuna'
@staticmethod
def _lowercase ( ) -> str:
return is_optuna_available()
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : int ) -> str:
return run_hp_search_optuna(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
return default_hp_space_optuna(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = 'ray'
a :Optional[int] = '\'ray[tune]\''
@staticmethod
def _lowercase ( ) -> Any:
return is_ray_available()
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Any ) -> str:
return run_hp_search_ray(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
return default_hp_space_ray(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[str] = 'sigopt'
@staticmethod
def _lowercase ( ) -> Union[str, Any]:
return is_sigopt_available()
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Union[str, Any]:
return run_hp_search_sigopt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
return default_hp_space_sigopt(SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :str = 'wandb'
@staticmethod
def _lowercase ( ) -> str:
return is_wandb_available()
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return run_hp_search_wandb(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
return default_hp_space_wandb(SCREAMING_SNAKE_CASE_ )
__a = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a ( ):
'''simple docstring'''
lowercase_ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case__ ) > 0:
lowercase_ = available_backends[0].name
if len(snake_case__ ) > 1:
logger.info(
F'''{len(snake_case__ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 30 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[str] , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__magic_name__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sgugger/tiny-distilbert-classification"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = """patrickvonplaten/t5-tiny-random"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , configs=[config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , """env.csv""" ) , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """env.csv""" ) ).exists() )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(UpperCamelCase__ : Dict ):
self.assertTrue(hasattr(UpperCamelCase__ , """sequential""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """cumulative""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """current""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , """log.txt""" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """log.txt""" ) ).exists() )
| 88 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
@property
def _A ( self : List[str] ):
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _A ( self : int ):
_UpperCAmelCase : Optional[Any] = self.dummy_uncond_unet
_UpperCAmelCase : Any = ScoreSdeVeScheduler()
_UpperCAmelCase : str = ScoreSdeVePipeline(unet=A , scheduler=A )
sde_ve.to(A )
sde_ve.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A ).images
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A , return_dict=A )[
0
]
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : int ):
_UpperCAmelCase : int = "google/ncsnpp-church-256"
_UpperCAmelCase : Any = UNetaDModel.from_pretrained(A )
_UpperCAmelCase : List[Any] = ScoreSdeVeScheduler.from_pretrained(A )
_UpperCAmelCase : Union[str, Any] = ScoreSdeVePipeline(unet=A , scheduler=A )
sde_ve.to(A )
sde_ve.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : Tuple = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=A ).images
_UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCAmelCase : int = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 31 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__lowerCAmelCase : Optional[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__lowerCAmelCase : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a__ ( A_ ):
'''simple docstring'''
return x[0]
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_letter_count(A_ )
__magic_name__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A_ )
__magic_name__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A_ )
__magic_name__ = """""".join(freq_to_letter[freq] )
__magic_name__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A_, reverse=A_ )
__magic_name__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_frequency_order(A_ )
__magic_name__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def SCREAMING_SNAKE_CASE_ ( *__A : str , __A : Optional[Union[Dict, Any]] = None , __A : Tuple=True , __A : int=2 ) -> Optional[Any]:
"""simple docstring"""
from .. import __version__
a_ : Dict = take_from
a_ : List[str] = ()
if not isinstance(args[0] , __A ):
a_ : Optional[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__A ).base_version ) >= version.parse(__A ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
a_ : Optional[Any] = None
if isinstance(__A , __A ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__A ),)
a_ : Optional[int] = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__A , __A ):
values += (getattr(__A , __A ),)
a_ : int = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
a_ : Union[str, Any] = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
a_ : str = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , __A , stacklevel=__A )
if isinstance(__A , __A ) and len(__A ) > 0:
a_ : List[Any] = inspect.getouterframes(inspect.currentframe() )[1]
a_ : Dict = call_frame.filename
a_ : Union[str, Any] = call_frame.lineno
a_ : Union[str, Any] = call_frame.function
a_ , a_ : Dict = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__A ) == 0:
return
elif len(__A ) == 1:
return values[0]
return values
| 32 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCAmelCase : Any = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def a__ ( A_=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_A ) )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = None
a__ = None
def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
__magic_name__ = dataset_module_factory(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
__magic_name__ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
__magic_name__ = builder_cls(
cache_dir=UpperCamelCase__ , config_name=UpperCamelCase__ , hash=dataset_module.hash , )
__magic_name__ = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCamelCase__ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__magic_name__ = cached_path(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__magic_name__ = None
builder_instance.download_and_prepare()
__magic_name__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path, dataset=A_ )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
__magic_name__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(A_, A_ )
assert "train" in ds
assert isinstance(ds["""train"""], A_ )
assert next(iter(ds["""train"""] ) )
| 88 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : str , __snake_case : str ):
lowercase_ : int = len(__snake_case )
lowercase_ : int = len(__snake_case )
lowercase_ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowercase_ : list = []
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 33 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = torch.nn.Linear(10 , 10 )
__magic_name__ = torch.optim.SGD(model.parameters() , 0.1 )
__magic_name__ = Accelerator()
__magic_name__ = accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 88 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_a , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_a , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_a )
return parser.parse_args()
def snake_case_ ():
UpperCAmelCase = parse_args()
# Import training_script as a module.
UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase = script_fpath.stem
UpperCAmelCase = importlib.import_module(_a )
# Patch sys.argv
UpperCAmelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 34 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Optional[int] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=1 ) -> str:
"""simple docstring"""
__magic_name__ = tokenizer
__magic_name__ = dataset
__magic_name__ = len(UpperCamelCase__ ) if n_tasks is None else n_tasks
__magic_name__ = n_copies
def __iter__( self : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
__magic_name__ = self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = start_length
__magic_name__ = eof_strings
__magic_name__ = tokenizer
def __call__( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__magic_name__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = re.split("""(%s)""" % """|""".join(A_ ), A_ )
# last string should be ""
return "".join(string_list[:-2] )
def a__ ( A_, A_, A_, A_, A_, A_=20, **A_ ):
'''simple docstring'''
__magic_name__ = defaultdict(A_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A_ ) ):
with torch.no_grad():
__magic_name__ = batch["""ids"""].shape[-1]
__magic_name__ = accelerator.unwrap_model(A_ ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]], num_return_sequences=A_, **A_ )
# each task is generated batch_size times
__magic_name__ = batch["""task_id"""].repeat(A_ )
__magic_name__ = accelerator.pad_across_processes(
A_, dim=1, pad_index=tokenizer.pad_token_id )
__magic_name__ , __magic_name__ = accelerator.gather((generated_tokens, generated_tasks) )
__magic_name__ = generated_tokens.cpu().numpy()
__magic_name__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A_, A_ ):
gen_token_dict[task].append(A_ )
__magic_name__ = [[] for _ in range(A_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__magic_name__ = tokenizer.decode(A_, skip_special_tokens=A_, clean_up_tokenization_spaces=A_ )
code_gens[task].append(remove_last_block(A_ ) )
return code_gens
def a__ ( ):
'''simple docstring'''
__magic_name__ = HfArgumentParser(A_ )
__magic_name__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__magic_name__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__magic_name__ = """false"""
if args.num_workers is None:
__magic_name__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__magic_name__ = Accelerator()
set_seed(args.seed, device_specific=A_ )
# Load model and tokenizer
__magic_name__ = AutoTokenizer.from_pretrained(args.model_ckpt )
__magic_name__ = tokenizer.eos_token
__magic_name__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__magic_name__ = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0, A_, A_ )] ),
}
# Load evaluation dataset and metric
__magic_name__ = load_dataset("""openai_humaneval""" )
__magic_name__ = load_metric("""code_eval""" )
__magic_name__ = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
__magic_name__ = args.n_samples // args.batch_size
__magic_name__ = TokenizedDataset(A_, human_eval["""test"""], n_copies=A_, n_tasks=A_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__magic_name__ = DataLoader(A_, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__magic_name__ = code_eval_metric.compute(references=[""""""], predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
__magic_name__ , __magic_name__ = accelerator.prepare(A_, A_ )
__magic_name__ = complete_code(
A_, A_, A_, A_, n_tasks=A_, batch_size=args.batch_size, **A_, )
if accelerator.is_main_process:
__magic_name__ = []
for task in tqdm(range(A_ ) ):
__magic_name__ = human_eval["""test"""][task]["""test"""]
__magic_name__ = f'''check({human_eval['test'][task]['entry_point']})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
__magic_name__ , __magic_name__ = code_eval_metric.compute(
references=A_, predictions=A_, num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file, """w""" ) as fp:
json.dump(A_, A_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 88 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
'''simple docstring'''
__magic_name__ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""", type=A_, default=1, help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""", type=A_, help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
), )
# rest from the training program
parser.add_argument("""training_script_args""", nargs=A_ )
return parser.parse_args()
def a__ ( ):
'''simple docstring'''
__magic_name__ = parse_args()
# Import training_script as a module.
__magic_name__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__magic_name__ = script_fpath.stem
__magic_name__ = importlib.import_module(A_ )
# Patch sys.argv
__magic_name__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 88 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BICUBIC, __a = True, __a = True, __a = 1 / 255, __a = None, __a = True, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Optional[int] = size if size is not None else {"height": 224, "width": 224}
_lowerCAmelCase : Optional[Any] = get_size_dict(__a)
_lowerCAmelCase : str = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowerCAmelCase : Tuple = get_size_dict(__a, default_to_square=__a, param_name="crop_size")
_lowerCAmelCase : Optional[int] = do_resize
_lowerCAmelCase : Optional[int] = do_rescale
_lowerCAmelCase : List[str] = do_normalize
_lowerCAmelCase : int = do_center_crop
_lowerCAmelCase : int = crop_size
_lowerCAmelCase : List[Any] = size
_lowerCAmelCase : Union[str, Any] = resample
_lowerCAmelCase : Union[str, Any] = rescale_factor
_lowerCAmelCase : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCAmelCase : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case__ ( self, __a, __a, __a = PILImageResampling.BILINEAR, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = get_size_dict(__a)
if "shortest_edge" in size:
_lowerCAmelCase : Dict = get_resize_output_image_size(__a, size=size["shortest_edge"], default_to_square=__a)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_lowerCAmelCase : int = (size["height"], size["width"])
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}")
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}")
return center_crop(__a, size=(size["height"], size["width"]), data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return normalize(__a, mean=__a, std=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Union[str, Any] = get_size_dict(__a, param_name="crop_size", default_to_square=__a)
_lowerCAmelCase : int = resample if resample is not None else self.resample
_lowerCAmelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : str = image_std if image_std is not None else self.image_std
_lowerCAmelCase : str = size if size is not None else self.size
_lowerCAmelCase : Union[str, Any] = get_size_dict(__a)
if not is_batched(__a):
_lowerCAmelCase : int = [images]
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : str = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : Union[str, Any] = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_center_crop:
_lowerCAmelCase : Optional[int] = [self.center_crop(image=__a, size=__a) for image in images]
if do_rescale:
_lowerCAmelCase : Dict = [self.rescale(image=__a, scale=__a) for image in images]
if do_normalize:
_lowerCAmelCase : Optional[int] = [self.normalize(image=__a, mean=__a, std=__a) for image in images]
_lowerCAmelCase : str = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : str = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
| 36 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pegasus"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
| 88 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1000 ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = 1, 1
lowerCAmelCase__ : int = []
for i in range(1 , n + 1 ):
lowerCAmelCase__ : List[str] = prev_numerator + 2 * prev_denominator
lowerCAmelCase__ : List[Any] = prev_numerator + prev_denominator
if len(str(UpperCamelCase ) ) > len(str(UpperCamelCase ) ):
result.append(UpperCamelCase )
lowerCAmelCase__ : str = numerator
lowerCAmelCase__ : Union[str, Any] = denominator
return len(UpperCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 37 |
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Optional[int] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : Optional[int] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Tuple=False , ) -> Dict:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in predictions] )
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in references] )
else:
__magic_name__ = np.asarray(UpperCamelCase__ )
__magic_name__ = np.asarray(UpperCamelCase__ )
if ignore_case:
__magic_name__ = np.char.lower(UpperCamelCase__ )
__magic_name__ = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
__magic_name__ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
__magic_name__ = string.digits.maketrans("""""" , """""" , string.digits )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 88 | 0 |
import argparse
import datetime
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> str:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
UpperCamelCase :str = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__magic_name__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
UpperCamelCase :int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
UpperCamelCase :str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
UpperCamelCase :int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
UpperCamelCase :str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
UpperCamelCase :int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
UpperCamelCase :Optional[Any] = datetime.date(int(__magic_name__ ) , int(__magic_name__ ) , int(__magic_name__ ) )
# Start math
if m <= 2:
UpperCamelCase :Dict = y - 1
UpperCamelCase :Union[str, Any] = m + 12
# maths var
UpperCamelCase :int = int(str(__magic_name__ )[:2] )
UpperCamelCase :int = int(str(__magic_name__ )[2:] )
UpperCamelCase :int = int(2.6 * m - 5.39 )
UpperCamelCase :int = int(c / 4 )
UpperCamelCase :int = int(k / 4 )
UpperCamelCase :int = int(d + k )
UpperCamelCase :int = int(t + u + v + x )
UpperCamelCase :int = int(z - (2 * c) )
UpperCamelCase :int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
UpperCamelCase :str = f"""Your date {date_input}, is a {days[str(__magic_name__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCAmelCase_ : Tuple = parser.parse_args()
zeller(args.date_input)
| 38 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = emb.weight.shape
__magic_name__ = nn.Linear(A_, A_, bias=A_ )
__magic_name__ = emb.weight.data
return lin_layer
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = torch.load(A_, map_location="""cpu""" )
__magic_name__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
__magic_name__ = checkpoint["""model"""]
remove_ignore_keys_(A_ )
__magic_name__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
__magic_name__ = {key.replace("""decoder""", """model""" ): val for key, val in state_dict.items()}
__magic_name__ = XGLMConfig(
vocab_size=A_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""gelu""", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
__magic_name__ = XGLMForCausalLM(A_ )
__magic_name__ = model.load_state_dict(A_, strict=A_ )
print(A_ )
__magic_name__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 88 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __A ( __lowerCAmelCase=None , __lowerCAmelCase=None )-> Dict:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__lowerCAmelCase )
@dataclass
class __lowerCamelCase :
"""simple docstring"""
UpperCamelCase__ = field(
metadata={"help": "The csv file to plot."} , )
UpperCamelCase__ = field(
default=snake_case__ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
UpperCamelCase__ = field(
default=snake_case__ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
UpperCamelCase__ = field(
default=snake_case__ , metadata={"help": "Disable logarithmic scale when plotting"} , )
UpperCamelCase__ = field(
default=snake_case__ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
UpperCamelCase__ = field(
default=snake_case__ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
UpperCamelCase__ = list_field(
default=snake_case__ , metadata={"help": "List of model names that are used instead of the ones in the csv file."})
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
try:
int(__lowerCAmelCase )
return True
except ValueError:
return False
def __A ( __lowerCAmelCase )-> Optional[int]:
"""simple docstring"""
try:
float(__lowerCAmelCase )
return True
except ValueError:
return False
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
_UpperCAmelCase = csv.DictReader(UpperCAmelCase )
for row in reader:
_UpperCAmelCase = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
_UpperCAmelCase = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
_UpperCAmelCase = float(row['result'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = plt.subplots()
_UpperCAmelCase = 'Time usage' if self.args.is_time else 'Memory usage'
_UpperCAmelCase = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]['bsz'] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]['seq_len'] ) )
_UpperCAmelCase = self.result_dict[model_name]['result']
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=UpperCAmelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_UpperCAmelCase) , (_UpperCAmelCase)) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_UpperCAmelCase = np.asarray(UpperCAmelCase , UpperCAmelCase )[: len(UpperCAmelCase )]
plt.scatter(
UpperCAmelCase , UpperCAmelCase , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(UpperCAmelCase , UpperCAmelCase , '--' )
title_str += F""" {label_model_name} vs."""
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(UpperCAmelCase )
plt.xlabel(UpperCAmelCase )
plt.ylabel(UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __A ( )-> List[Any]:
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=__lowerCAmelCase )
plot.plot()
if __name__ == "__main__":
main()
| 39 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : int = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__lowerCAmelCase : Any = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
__lowerCAmelCase : str = '|'.join(sys.argv[1:])
__lowerCAmelCase : Tuple = re.compile(RF'''^({joined_dirs}).*?\.py$''')
__lowerCAmelCase : Union[str, Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 88 | 0 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = XGLMTokenizer
UpperCAmelCase : Dict = XGLMTokenizerFast
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Any = True
def __snake_case ( self : int):
super().setUp()
# We have a SentencePiece fixture for testing
a : Optional[int] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : str):
a : str = "<pad>"
a : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(__UpperCAmelCase) , 1008)
def __snake_case ( self : int):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def __snake_case ( self : Union[str, Any]):
a : Optional[int] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
a : Optional[int] = tokenizer.tokenize("This is a test")
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a : str = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a : int = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a : int = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __snake_case ( self : str):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def __snake_case ( self : Optional[Any]):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCAmelCase , f.name)
a : List[str] = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase)
a : str = pickle.dumps(__UpperCAmelCase)
pickle.loads(__UpperCAmelCase)
def __snake_case ( self : List[str]):
if not self.test_rust_tokenizer:
return
a : Dict = self.get_tokenizer()
a : Any = self.get_rust_tokenizer()
a : Optional[Any] = "I was born in 92000, and this is falsé."
a : Any = tokenizer.tokenize(__UpperCAmelCase)
a : Optional[Any] = rust_tokenizer.tokenize(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
a : Optional[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : str = self.get_rust_tokenizer()
a : int = tokenizer.encode(__UpperCAmelCase)
a : str = rust_tokenizer.encode(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
@slow
def __snake_case ( self : Optional[int]):
a : Tuple = "Hello World!"
a : Union[str, Any] = [2, 31227, 4447, 35]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase))
@slow
def __snake_case ( self : Any):
a : Optional[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
a : str = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase))
@slow
def __snake_case ( self : Optional[Any]):
# fmt: off
a : List[Any] = {
"input_ids": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="facebook/xglm-564M" , padding=__UpperCAmelCase , )
| 40 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=99 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=36 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : List[str]=6 , UpperCamelCase__ : Union[str, Any]=6 , UpperCamelCase__ : int=37 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=16 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Dict=None , ) -> Any:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = embedding_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_hidden_groups
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowercase ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = AlbertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
__magic_name__ = AlbertForPreTraining(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , sentence_order_label=UpperCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = AlbertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = AlbertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = AlbertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = AlbertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.num_choices
__magic_name__ = AlbertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
def _lowercase ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
__magic_name__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ )
__magic_name__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def _lowercase ( self : int ) -> int:
"""simple docstring"""
__magic_name__ = AlbertModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = AlbertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = AlbertModel.from_pretrained("""albert-base-v2""" )
__magic_name__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__magic_name__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__magic_name__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
__magic_name__ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) )
| 88 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any =logging.get_logger(__name__)
_A : List[str] ={
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _lowercase ( _lowercase ):
a = """decision_transformer"""
a = ["""past_key_values"""]
a = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self: Optional[Any] , UpperCamelCase__: Dict=17 , UpperCamelCase__: int=4 , UpperCamelCase__: Optional[int]=128 , UpperCamelCase__: int=4_096 , UpperCamelCase__: Dict=True , UpperCamelCase__: Optional[Any]=1 , UpperCamelCase__: Any=1_024 , UpperCamelCase__: Dict=3 , UpperCamelCase__: List[str]=1 , UpperCamelCase__: int=None , UpperCamelCase__: Union[str, Any]="relu" , UpperCamelCase__: List[Any]=0.1 , UpperCamelCase__: Dict=0.1 , UpperCamelCase__: int=0.1 , UpperCamelCase__: Optional[int]=1e-5 , UpperCamelCase__: str=0.02 , UpperCamelCase__: Optional[Any]=True , UpperCamelCase__: Any=True , UpperCamelCase__: int=50_256 , UpperCamelCase__: Union[str, Any]=50_256 , UpperCamelCase__: Optional[int]=False , UpperCamelCase__: Union[str, Any]=False , **UpperCamelCase__: List[Any] , ):
lowerCamelCase__ : Optional[Any] = state_dim
lowerCamelCase__ : Optional[int] = act_dim
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : str = max_ep_len
lowerCamelCase__ : Optional[Any] = action_tanh
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Dict = n_positions
lowerCamelCase__ : Union[str, Any] = n_layer
lowerCamelCase__ : List[Any] = n_head
lowerCamelCase__ : int = n_inner
lowerCamelCase__ : str = activation_function
lowerCamelCase__ : str = resid_pdrop
lowerCamelCase__ : Optional[Any] = embd_pdrop
lowerCamelCase__ : Union[str, Any] = attn_pdrop
lowerCamelCase__ : int = layer_norm_epsilon
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : str = scale_attn_weights
lowerCamelCase__ : Tuple = use_cache
lowerCamelCase__ : Any = scale_attn_by_inverse_layer_idx
lowerCamelCase__ : Any = reorder_and_upcast_attn
lowerCamelCase__ : List[Any] = bos_token_id
lowerCamelCase__ : Dict = eos_token_id
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 41 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """biogpt"""
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=4_2384 , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : Any=24 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Tuple=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : str=1024 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[str]=1E-12 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : List[str]=2 , **UpperCamelCase__ : Optional[int] , ) -> Tuple:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = scale_embedding
__magic_name__ = use_cache
__magic_name__ = layerdrop
__magic_name__ = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 88 | 0 |
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 0
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowerCAmelCase_ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowerCAmelCase_ ) , 0 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
# Check that tokenizer_type ≠ model_type
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , config=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(lowerCAmelCase_ , 'vocab.txt' ) )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , tokenizer_type='bert' , use_fast=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(lowerCAmelCase_ , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(lowerCAmelCase_ , 'merges.txt' ) )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , tokenizer_type='gpt2' , use_fast=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(lowerCAmelCase_ , 'vocab.txt' ) )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , tokenizer_type='bert' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(lowerCAmelCase_ , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(lowerCAmelCase_ , 'merges.txt' ) )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , tokenizer_type='gpt2' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase_ ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_snake_case = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowerCAmelCase_ )
else:
self.assertEqual(tokenizer.do_lower_case , lowerCAmelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCAmelCase_ , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
_snake_case = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = TOKENIZER_MAPPING.values()
_snake_case = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCAmelCase_ )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , lowerCAmelCase_ )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=lowerCAmelCase_ )
_snake_case = 'Hello, world. How are you?'
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual('[UNK]' , tokens[0] )
_snake_case = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=lowerCAmelCase_ )
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = get_tokenizer_config('bert-base-cased' )
_snake_case = config.pop('_commit_hash' , lowerCAmelCase_ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCAmelCase_ , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_snake_case = get_tokenizer_config(lowerCAmelCase_ )
self.assertDictEqual(lowerCAmelCase_ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = get_tokenizer_config(lowerCAmelCase_ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def lowerCamelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register('custom' , lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
_snake_case = CustomTokenizer.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register('custom' , lowerCAmelCase_ )
# Can register in two steps
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(lowerCAmelCase_ , fast_tokenizer_class=lowerCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ , fast_tokenizer_class=lowerCAmelCase_ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoTokenizer.register(lowerCAmelCase_ , fast_tokenizer_class=lowerCAmelCase_ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = BertTokenizerFast.from_pretrained(lowerCAmelCase_ )
bert_tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = CustomTokenizerFast.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase_ ):
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def lowerCamelCase ( self ):
"""simple docstring"""
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = False
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = NewTokenizer
__lowercase = False
try:
AutoConfig.register('custom' , lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , fast_tokenizer_class=lowerCAmelCase_ )
# If remote code is not set, the default is to use local
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=lowerCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
_snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase_ , 'bert-base is not a local folder and is not a valid model identifier' ):
_snake_case = AutoTokenizer.from_pretrained('bert-base' )
def lowerCamelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_snake_case = AutoTokenizer.from_pretrained(lowerCAmelCase_ , revision='aaaaaa' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 42 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__lowerCAmelCase : Any = get_logger(__name__)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[str] = None ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = (
os.path.join(UpperCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__magic_name__ = Extractor
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__magic_name__ = os.path.abspath(UpperCamelCase__ )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase__ ) )
def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(UpperCamelCase__ ) and not (os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ ))
)
def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> str:
"""simple docstring"""
__magic_name__ = self.extractor.infer_extractor_format(UpperCamelCase__ )
if not extractor_format:
return input_path
__magic_name__ = self._get_output_path(UpperCamelCase__ )
if self._do_extract(UpperCamelCase__ , UpperCamelCase__ ):
self.extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return output_path
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
@abstractmethod
def _lowercase ( cls : List[str] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : Union[str, Any] ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
...
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
a__ = []
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> List[str]:
"""simple docstring"""
with open(UpperCamelCase__ , """rb""" ) as f:
return f.read(UpperCamelCase__ )
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
__magic_name__ = max(len(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
try:
__magic_name__ = cls.read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(UpperCamelCase__ )
@staticmethod
def _lowercase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
def resolved(UpperCamelCase__ : str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase__ ) )
def badpath(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ).startswith(UpperCamelCase__ )
def badlink(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
__magic_name__ = resolved(os.path.join(UpperCamelCase__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase__ )
__magic_name__ = resolved(UpperCamelCase__ )
for finfo in members:
if badpath(finfo.name , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = tarfile.open(UpperCamelCase__ )
tar_file.extractall(UpperCamelCase__ , members=TarExtractor.safemembers(UpperCamelCase__ , UpperCamelCase__ ) )
tar_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x1F\x8B"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(UpperCamelCase__ , """rb""" ) as gzip_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase__ , """rb""" ) as fp:
__magic_name__ = _EndRecData(UpperCamelCase__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__magic_name__ = fp.read(UpperCamelCase__ ) # CD is where we expect it to be
if len(UpperCamelCase__ ) == sizeCentralDir:
__magic_name__ = struct.unpack(UpperCamelCase__ , UpperCamelCase__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with zipfile.ZipFile(UpperCamelCase__ , """r""" ) as zip_file:
zip_file.extractall(UpperCamelCase__ )
zip_file.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(UpperCamelCase__ ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__magic_name__ = rarfile.RarFile(UpperCamelCase__ )
rf.extractall(UpperCamelCase__ )
rf.close()
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
__magic_name__ = zstd.ZstdDecompressor()
with open(UpperCamelCase__ , """rb""" ) as ifh, open(UpperCamelCase__ , """wb""" ) as ofh:
dctx.copy_stream(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x42\x5A\x68"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with pyazr.SevenZipFile(UpperCamelCase__ , """r""" ) as archive:
archive.extractall(UpperCamelCase__ )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCAmelCase_ :
'''simple docstring'''
a__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowercase ( cls : Tuple ) -> Tuple:
"""simple docstring"""
return max(
len(UpperCamelCase__ )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase__ , UpperCamelCase__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase__ , magic_number_length=UpperCamelCase__ )
except OSError:
return b""
@classmethod
def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = cls.infer_extractor_format(UpperCamelCase__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowercase ( cls : Dict , UpperCamelCase__ : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
__magic_name__ = cls._get_magic_number_max_length()
__magic_name__ = cls._read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return extractor_format
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(UpperCamelCase__ ) , exist_ok=UpperCamelCase__ )
# Prevent parallel extractions
__magic_name__ = str(Path(UpperCamelCase__ ).with_suffix(""".lock""" ) )
with FileLock(UpperCamelCase__ ):
shutil.rmtree(UpperCamelCase__ , ignore_errors=UpperCamelCase__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=UpperCamelCase__ , )
__magic_name__ = extractor if extractor != """deprecated""" else extractor_format
else:
__magic_name__ = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=UpperCamelCase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase__ ):
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
| 88 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE ) )
return data_lists
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[int] = min(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = max(SCREAMING_SNAKE_CASE )
__UpperCamelCase :list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__UpperCamelCase :int = f"""Invalid weight of {weight:f} provided"""
raise ValueError(SCREAMING_SNAKE_CASE )
score_lists.append(SCREAMING_SNAKE_CASE )
return score_lists
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = final_scores[j] + ele
return final_scores
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = get_data(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = calculate_each_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = generate_final_scores(SCREAMING_SNAKE_CASE )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE ):
source_data[i].append(SCREAMING_SNAKE_CASE )
return source_data
| 43 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 0 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : List[Any] = 8
# DPR tok
_lowerCAmelCase : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(a__ , exist_ok=a__ )
_lowerCAmelCase : int = os.path.join(a__ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_lowerCAmelCase : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowerCAmelCase : str = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : List[str] = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(a__ , exist_ok=a__ )
_lowerCAmelCase : Dict = os.path.join(a__ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Tuple = os.path.join(a__ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def __A ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def __A ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_dummy_dataset()
_lowerCAmelCase : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
_lowerCAmelCase : List[Any] = dataset
_lowerCAmelCase : Optional[Any] = RagRetriever(
a__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = self.get_dummy_dataset()
_lowerCAmelCase : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , """dataset""" )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
_lowerCAmelCase : Optional[int] = RagRetriever(
a__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_lowerCAmelCase : int = RagRetriever(
a__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , a__ ) , )
return retriever
def __A ( self ):
_lowerCAmelCase : Optional[Any] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
_lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
_lowerCAmelCase : Union[str, Any] = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(a__ , open(a__ , """wb""" ) )
_lowerCAmelCase : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
_lowerCAmelCase : str = RagRetriever(
a__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __A ( self ):
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : Dict = self.get_dummy_canonical_hf_index_retriever()
_lowerCAmelCase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = retriever.retrieve(a__ , n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , a__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
_lowerCAmelCase : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(a__ )
_lowerCAmelCase : Dict = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
_lowerCAmelCase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : int = retriever.retrieve(a__ , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self ):
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
_lowerCAmelCase : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = retriever.retrieve(a__ , n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , a__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self ):
_lowerCAmelCase : Any = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a__ )
_lowerCAmelCase : Tuple = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
_lowerCAmelCase : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : Any = retriever.retrieve(a__ , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self ):
_lowerCAmelCase : Any = 1
_lowerCAmelCase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
_lowerCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = retriever.retrieve(a__ , n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , a__ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a__ )
_lowerCAmelCase : List[Any] = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
_lowerCAmelCase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : List[Any] = retriever.retrieve(a__ , n_docs=1 )
self.assertTrue(out is not None )
def __A ( self ):
_lowerCAmelCase : int = 1
_lowerCAmelCase : Tuple = self.get_dummy_legacy_index_retriever()
_lowerCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = retriever.retrieve(a__ , n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , a__ )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __A ( self ):
_lowerCAmelCase : str = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a__ )
_lowerCAmelCase : Tuple = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
_lowerCAmelCase : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : List[str] = retriever.retrieve(a__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __A ( self ):
import torch
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : int = self.get_dummy_canonical_hf_index_retriever()
_lowerCAmelCase : Optional[Any] = [[5, 7], [10, 11]]
_lowerCAmelCase : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : List[str] = retriever(a__ , a__ , prefix=retriever.config.generator.prefix , n_docs=a__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(a__ , a__ )
self.assertIsInstance(a__ , a__ )
self.assertIsInstance(a__ , np.ndarray )
_lowerCAmelCase : int = retriever(
a__ , a__ , prefix=retriever.config.generator.prefix , n_docs=a__ , return_tensors="""pt""" , )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(a__ , torch.Tensor )
self.assertIsInstance(a__ , torch.Tensor )
self.assertIsInstance(a__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : int = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
retriever.set_ctx_encoder_tokenizer(a__ )
_lowerCAmelCase : Any = [[5, 7], [10, 11]]
_lowerCAmelCase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : List[Any] = retriever(a__ , a__ , prefix=retriever.config.generator.prefix , n_docs=a__ )
self.assertEqual(
len(a__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , a__ ) # check for doc token related keys in dictionary.
| 44 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowercase_ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Union[str, Any]:
for attribute in key.split('''.''' ):
__a = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
__a = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> int:
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__a = None
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
elif name.split('''.''' )[0] == "proj":
__a = fairseq_model.proj
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
__a = '''weight'''
else:
__a = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
return proj_weight
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> str:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__a = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__a = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__a = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__a = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> List[str]:
__a , __a = emb.weight.shape
__a = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__a = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Tuple:
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
__a = f.readlines()
__a = [line.split(''' ''' )[0] for line in lines]
__a = len(lowerCAmelCase__ )
__a = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(lowerCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , ) -> Union[str, Any]:
__a = WavaVecaConfig.from_pretrained(lowerCAmelCase__ )
__a = SpeechaTextaConfig.from_pretrained(
lowerCAmelCase__ , vocab_size=lowerCAmelCase__ , decoder_layers=lowerCAmelCase__ , do_stable_layer_norm=lowerCAmelCase__ )
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__a = model[0].eval()
# set weights for wav2vec2 encoder
__a = WavaVecaModel(lowerCAmelCase__ )
__a = recursively_load_weights_wavaveca(model.encoder , lowerCAmelCase__ )
__a = SpeechaTextaForCausalLM(lowerCAmelCase__ )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCAmelCase__ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__a = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__a = SpeechEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
__a = False
# add projection layer
__a = nn.Parameter(projection_layer.weight )
__a = nn.Parameter(projection_layer.bias )
__a = create_vocab_dict(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
__a = SpeechaTextaTokenizer(os.path.join(lowerCAmelCase__ , '''vocab.json''' ) )
tokenizer.save_pretrained(lowerCAmelCase__ )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = '''speech_to_text_2'''
__a = '''wav2vec2'''
__a = SpeechEncoderDecoderConfig.from_dict(lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
feature_extractor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0_2_2_4, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
lowercase_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 45 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple:
"""simple docstring"""
__magic_name__ = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = generator("""Something there""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
__magic_name__ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
__magic_name__ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
__magic_name__ = 3
__magic_name__ = generator(
"""Something there""" , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
__magic_name__ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = generator("""This is a test""" , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
__magic_name__ = generator.model.config.eos_token_id
__magic_name__ = """<pad>"""
__magic_name__ = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowercase ( self : int ) -> str:
"""simple docstring"""
__magic_name__ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
__magic_name__ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
| 88 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowercase :
def __init__( self , lowercase , lowercase=99 , lowercase=13 , lowercase=7 , lowercase=9 , lowercase=True , lowercase=True , lowercase=False , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase=8 , lowercase=0.1 , lowercase=0.002 , lowercase=1 , lowercase=0 , lowercase=0 , lowercase=None , lowercase=None , ) -> Optional[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = encoder_seq_length
lowerCAmelCase = decoder_seq_length
# For common tests
lowerCAmelCase = self.decoder_seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = d_ff
lowerCAmelCase = relative_attention_num_buckets
lowerCAmelCase = dropout_rate
lowerCAmelCase = initializer_factor
lowerCAmelCase = eos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = decoder_start_token_id
lowerCAmelCase = None
lowerCAmelCase = decoder_layers
def _snake_case ( self ) -> str:
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None , ) -> Optional[Any]:
if attention_mask is None:
lowerCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowercase )
if decoder_head_mask is None:
lowerCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowercase )
if cross_attn_head_mask is None:
lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowercase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self ) -> int:
lowerCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase = self.get_config()
lowerCAmelCase = config.num_attention_heads
lowerCAmelCase = self.prepare_inputs_dict(lowercase , lowercase , lowercase )
return config, input_dict
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self ) -> Optional[Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> str:
lowerCAmelCase = UMTaModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(
input_ids=lowercase , decoder_input_ids=lowercase , attention_mask=lowercase , decoder_attention_mask=lowercase , )
lowerCAmelCase = model(input_ids=lowercase , decoder_input_ids=lowercase )
lowerCAmelCase = result.last_hidden_state
lowerCAmelCase = result.past_key_values
lowerCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowercase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]:
lowerCAmelCase = UMTaModel(config=lowercase ).get_decoder().to(lowercase ).eval()
# first forward pass
lowerCAmelCase = model(lowercase , use_cache=lowercase )
lowerCAmelCase = model(lowercase )
lowerCAmelCase = model(lowercase , use_cache=lowercase )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) + 1 )
lowerCAmelCase , lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = model(lowercase )["""last_hidden_state"""]
lowerCAmelCase = model(lowercase , past_key_values=lowercase )["""last_hidden_state"""]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) )
def _snake_case ( self , lowercase , lowercase , ) -> str:
lowerCAmelCase = UMTaModel(config=lowercase ).to(lowercase ).half().eval()
lowerCAmelCase = model(**lowercase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(lowercase ).any().item() )
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_SCREAMING_SNAKE_CASE = [0.8, 0.9]
def _snake_case ( self ) -> str:
lowerCAmelCase = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = UMTaModel(config_and_inputs[0] ).to(lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowercase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=lowercase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase = config_and_inputs[0]
lowerCAmelCase = UMTaForConditionalGeneration(lowercase ).eval()
model.to(lowercase )
lowerCAmelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=lowercase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase ),
}
for attn_name, (name, mask) in zip(lowercase , head_masking.items() ):
lowerCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowercase )
lowerCAmelCase = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=lowercase , return_dict_in_generate=lowercase , **lowercase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self ) -> Union[str, Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=lowercase ).to(lowercase )
lowerCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=lowercase , legacy=lowercase )
lowerCAmelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
lowerCAmelCase = tokenizer(lowercase , return_tensors="""pt""" , padding=lowercase ).input_ids
# fmt: off
lowerCAmelCase = torch.tensor(
[
[ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowercase , lowercase )
lowerCAmelCase = model.generate(input_ids.to(lowercase ) )
lowerCAmelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
lowerCAmelCase = tokenizer.batch_decode(lowercase )
self.assertEqual(lowercase , lowercase )
| 46 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : List[Any] = 16
__lowerCAmelCase : Any = 32
def a__ ( A_, A_, A_, A_, A_ = 16 ):
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__magic_name__ = DatasetDict(
{
"""train""": dataset["""train"""].select(A_ ),
"""validation""": dataset["""train"""].select(A_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(A_ ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=A_, max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ = datasets.map(
A_, batched=A_, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(A_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ = 16
elif accelerator.mixed_precision != "no":
__magic_name__ = 8
else:
__magic_name__ = None
return tokenizer.pad(
A_, padding="""longest""", max_length=A_, pad_to_multiple_of=A_, return_tensors="""pt""", )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets["""train"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""validation"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""test"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
return train_dataloader, eval_dataloader, test_dataloader
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = []
# Download the dataset
__magic_name__ = load_dataset("""glue""", """mrpc""" )
# Create our splits
__magic_name__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__magic_name__ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config["""lr"""]
__magic_name__ = int(config["""num_epochs"""] )
__magic_name__ = int(config["""seed"""] )
__magic_name__ = int(config["""batch_size"""] )
__magic_name__ = evaluate.load("""glue""", """mrpc""" )
# If the batch size is too big we use gradient accumulation
__magic_name__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ = MAX_GPU_BATCH_SIZE
set_seed(A_ )
# New Code #
# Create our folds:
__magic_name__ = kfold.split(np.zeros(datasets["""train"""].num_rows ), datasets["""train"""]["""label"""] )
__magic_name__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A_ ):
__magic_name__ , __magic_name__ , __magic_name__ = get_fold_dataloaders(
A_, A_, A_, A_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=A_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ = AdamW(params=model.parameters(), lr=A_ )
# Instantiate scheduler
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=A_, num_warmup_steps=100, num_training_steps=(len(A_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
A_, A_, A_, A_, A_ )
# Now we train the model
for epoch in range(A_ ):
model.train()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ = model(**A_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=A_, references=A_, )
__magic_name__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''', A_ )
# New Code #
# We also run predictions on the test set at the very end
__magic_name__ = []
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__magic_name__ = torch.cat(A_, dim=0 )
__magic_name__ = torch.stack(A_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__magic_name__ = metric.compute(predictions=A_, references=A_ )
accelerator.print("""Average test metrics from all folds:""", A_ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=A_, default=A_, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""", type=A_, default=3, help="""The number of splits to perform across the dataset""" )
__magic_name__ = parser.parse_args()
__magic_name__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A_, A_ )
if __name__ == "__main__":
main()
| 88 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =19_01
_SCREAMING_SNAKE_CASE =0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_SCREAMING_SNAKE_CASE =day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_SCREAMING_SNAKE_CASE =day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_SCREAMING_SNAKE_CASE =day - days_per_month[month - 2]
if month > 12:
year += 1
_SCREAMING_SNAKE_CASE =1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 47 |
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(A_ ) == 1:
return True
__magic_name__ = series[1] - series[0]
for index in range(len(A_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def a__ ( A_ ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(A_ ) == 0:
raise ValueError("""Input list must be a non empty list""" )
__magic_name__ = 0
for val in series:
answer += val
return answer / len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=30 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , ) -> List[Any]:
lowerCamelCase : List[Any] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : int = patch_size
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : str = hidden_act
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = type_sequence_label_size
lowerCamelCase : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase : Any = (image_size // patch_size) ** 2
lowerCamelCase : str = num_patches + 1
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : Any = FlaxViTModel(config=UpperCamelCase__ )
lowerCamelCase : List[Any] = model(UpperCamelCase__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase : Any = (self.image_size, self.image_size)
lowerCamelCase : Union[str, Any] = (self.patch_size, self.patch_size)
lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
lowerCamelCase : str = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FlaxViTForImageClassification(config=UpperCamelCase__ )
lowerCamelCase : Any = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : Tuple = 1
lowerCamelCase : Any = FlaxViTForImageClassification(UpperCamelCase__ )
lowerCamelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(UpperCamelCase__ )
def _lowercase ( self ) -> int:
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Any = config_and_inputs
lowerCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowercase ( self ) -> None:
lowerCamelCase : Any = FlaxViTModelTester(self )
lowerCamelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self ) -> int:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Dict = model_class(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Tuple = [*signature.parameters.keys()]
lowerCamelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase : Dict = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = model_class(UpperCamelCase__ )
@jax.jit
def model_jitted(UpperCamelCase__ , **UpperCamelCase__ ):
return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ )
with self.subTest("JIT Enabled" ):
lowerCamelCase : Optional[Any] = model_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCamelCase : List[Any] = model_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase : str = model_class_name.from_pretrained("google/vit-base-patch16-224" )
lowerCamelCase : Optional[int] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 48 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = 42
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : str=3 , UpperCamelCase__ : List[Any]=("DownEncoderBlock2D",) , UpperCamelCase__ : Optional[Any]=(64,) , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Optional[Any]="silu" , UpperCamelCase__ : List[str]=True , ) -> str:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
# down
__magic_name__ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = 2 * out_channels if double_z else out_channels
__magic_name__ = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = x
__magic_name__ = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : int ):
def custom_forward(*UpperCamelCase__ : str ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
__magic_name__ = down_block(UpperCamelCase__ )
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ )
# post-process
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int=3 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[Any]=("UpDecoderBlock2D",) , UpperCamelCase__ : List[Any]=(64,) , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Optional[int]="silu" , UpperCamelCase__ : Tuple="group" , ) -> Dict:
"""simple docstring"""
super().__init__()
__magic_name__ = layers_per_block
__magic_name__ = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__magic_name__ = None
__magic_name__ = nn.ModuleList([] )
__magic_name__ = in_channels if norm_type == """spatial""" else None
# mid
__magic_name__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
__magic_name__ = list(reversed(UpperCamelCase__ ) )
__magic_name__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
__magic_name__ = output_channel
__magic_name__ = reversed_block_out_channels[i]
__magic_name__ = i == len(UpperCamelCase__ ) - 1
__magic_name__ = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
__magic_name__ = output_channel
# out
if norm_type == "spatial":
__magic_name__ = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
__magic_name__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1E-6 )
__magic_name__ = nn.SiLU()
__magic_name__ = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
__magic_name__ = False
def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None ) -> Tuple:
"""simple docstring"""
__magic_name__ = z
__magic_name__ = self.conv_in(UpperCamelCase__ )
__magic_name__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase__ : Optional[int] ):
def custom_forward(*UpperCamelCase__ : int ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
__magic_name__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
__magic_name__ = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
__magic_name__ = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ )
else:
__magic_name__ = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self.conv_act(UpperCamelCase__ )
__magic_name__ = self.conv_out(UpperCamelCase__ )
return sample
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict="random" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=True ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__magic_name__ = n_e
__magic_name__ = vq_embed_dim
__magic_name__ = beta
__magic_name__ = legacy
__magic_name__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__magic_name__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
__magic_name__ = self.used.shape[0]
__magic_name__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__magic_name__ = self.re_embed
__magic_name__ = self.re_embed + 1
print(
F'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
F'''Using {self.unknown_index} for unknown indices.''' )
else:
__magic_name__ = n_e
__magic_name__ = sane_index_shape
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
__magic_name__ = (inds[:, :, None] == used[None, None, ...]).long()
__magic_name__ = match.argmax(-1 )
__magic_name__ = match.sum(2 ) < 1
if self.unknown_index == "random":
__magic_name__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__magic_name__ = self.unknown_index
return new.reshape(UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
__magic_name__ = inds.shape
assert len(UpperCamelCase__ ) > 1
__magic_name__ = inds.reshape(ishape[0] , -1 )
__magic_name__ = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
__magic_name__ = 0 # simply set to zero
__magic_name__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
__magic_name__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
__magic_name__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__magic_name__ = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
__magic_name__ = self.embedding(UpperCamelCase__ ).view(z.shape )
__magic_name__ = None
__magic_name__ = None
# compute loss for embedding
if not self.legacy:
__magic_name__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__magic_name__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__magic_name__ = z + (z_q - z).detach()
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__magic_name__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__magic_name__ = self.remap_to_used(UpperCamelCase__ )
__magic_name__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__magic_name__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] ) -> int:
"""simple docstring"""
if self.remap is not None:
__magic_name__ = indices.reshape(shape[0] , -1 ) # add batch axis
__magic_name__ = self.unmap_to_all(UpperCamelCase__ )
__magic_name__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__magic_name__ = self.embedding(UpperCamelCase__ )
if shape is not None:
__magic_name__ = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
__magic_name__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parameters
__magic_name__ , __magic_name__ = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
__magic_name__ = torch.clamp(self.logvar , -30.0 , 20.0 )
__magic_name__ = deterministic
__magic_name__ = torch.exp(0.5 * self.logvar )
__magic_name__ = torch.exp(self.logvar )
if self.deterministic:
__magic_name__ = __magic_name__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowercase ( self : Tuple , UpperCamelCase__ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
"""simple docstring"""
__magic_name__ = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
__magic_name__ = self.mean + self.std * sample
return x
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
__magic_name__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.mean
| 88 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__snake_case :Union[str, Any] = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
__snake_case :str = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
__snake_case :List[str] = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]="auto" , __SCREAMING_SNAKE_CASE : Any=-1 , __SCREAMING_SNAKE_CASE : List[Any]=0.9 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=500 , __SCREAMING_SNAKE_CASE : Dict="gpt2-large" , __SCREAMING_SNAKE_CASE : Optional[int]=-1 , __SCREAMING_SNAKE_CASE : int=1_024 , __SCREAMING_SNAKE_CASE : Tuple=25 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=25 , ):
'''simple docstring'''
__a = compute_mauve(
p_text=__SCREAMING_SNAKE_CASE , q_text=__SCREAMING_SNAKE_CASE , p_features=__SCREAMING_SNAKE_CASE , q_features=__SCREAMING_SNAKE_CASE , p_tokens=__SCREAMING_SNAKE_CASE , q_tokens=__SCREAMING_SNAKE_CASE , num_buckets=__SCREAMING_SNAKE_CASE , pca_max_data=__SCREAMING_SNAKE_CASE , kmeans_explained_var=__SCREAMING_SNAKE_CASE , kmeans_num_redo=__SCREAMING_SNAKE_CASE , kmeans_max_iter=__SCREAMING_SNAKE_CASE , featurize_model_name=__SCREAMING_SNAKE_CASE , device_id=__SCREAMING_SNAKE_CASE , max_text_length=__SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=__SCREAMING_SNAKE_CASE , mauve_scaling_factor=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , seed=__SCREAMING_SNAKE_CASE , )
return out
| 49 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Any=[1, 2, 1] , UpperCamelCase__ : int=[2, 2, 4] , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=2.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=10 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCamelCase__ : Tuple=[1, 2, 3] , ) -> Dict:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
__magic_name__ = window_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_absolute_embeddings
__magic_name__ = patch_norm
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = is_training
__magic_name__ = scope
__magic_name__ = use_labels
__magic_name__ = type_sequence_label_size
__magic_name__ = encoder_stride
__magic_name__ = out_features
__magic_name__ = out_indices
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase__ ):
__magic_name__ = ["""stem"""]
__magic_name__ = MaskFormerSwinBackbone(config=UpperCamelCase__ )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swin has a different seq_length
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCamelCase__ : Union[str, Any] ):
__magic_name__ = 0
return t
def check_equivalence(UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int={} ):
with torch.no_grad():
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has'''
F''' `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}.'''
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
__magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _A ):
'''simple docstring'''
a__ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ = MaskFormerSwinConfig
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__magic_name__ = backbone_class(UpperCamelCase__ )
backbone.to(UpperCamelCase__ )
backbone.eval()
__magic_name__ = backbone(**UpperCamelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__magic_name__ = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__magic_name__ , __magic_name__ , __magic_name__ = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__magic_name__ = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertIsNotNone(outputs.attentions )
| 88 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : int ) -> Any:
lowerCamelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('google/mt5-small' )
lowerCamelCase__ : Any = tokenizer('Hello there' , return_tensors='pt' ).input_ids
lowerCamelCase__ : Tuple = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
lowerCamelCase__ : Optional[int] = model(input_ids.to(UpperCAmelCase ) , labels=labels.to(UpperCAmelCase ) ).loss
lowerCamelCase__ : Dict = -(labels.shape[-1] * loss.item())
lowerCamelCase__ : Optional[int] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 50 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : int ) -> None:
"""simple docstring"""
__magic_name__ = value
__magic_name__ = None
__magic_name__ = None
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Node ) -> None:
"""simple docstring"""
__magic_name__ = tree
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 51 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : str = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase : List[Any] = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[str] , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__magic_name__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sgugger/tiny-distilbert-classification"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , [config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__magic_name__ = """patrickvonplaten/t5-tiny-random"""
__magic_name__ = AutoConfig.from_pretrained(UpperCamelCase__ )
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ , configs=[config] )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , """env.csv""" ) , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """env.csv""" ) ).exists() )
def _lowercase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(UpperCamelCase__ : Dict ):
self.assertTrue(hasattr(UpperCamelCase__ , """sequential""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """cumulative""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """current""" ) )
self.assertTrue(hasattr(UpperCamelCase__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , """log.txt""" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , eager_mode=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
__magic_name__ = TensorFlowBenchmark(UpperCamelCase__ )
__magic_name__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , """log.txt""" ) ).exists() )
| 88 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] =logging.get_logger(__name__)
a__ : List[Any] ={
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="altclip_text_model"
def __init__( self : str , __A : List[Any]=2_5_0_0_0_2 , __A : Any=1_0_2_4 , __A : int=2_4 , __A : Dict=1_6 , __A : Optional[Any]=4_0_9_6 , __A : Union[str, Any]="gelu" , __A : Dict=0.1 , __A : Dict=0.1 , __A : List[str]=5_1_4 , __A : Optional[int]=1 , __A : int=0.02 , __A : Optional[Any]=0.02 , __A : Optional[Any]=1e-05 , __A : Dict=1 , __A : List[Any]=0 , __A : int=2 , __A : Tuple="absolute" , __A : Optional[Any]=True , __A : Optional[int]=7_6_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_factor
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = project_dim
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="altclip_vision_model"
def __init__( self : List[Any] , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=3_0_7_2 , __A : Optional[Any]=5_1_2 , __A : Tuple=1_2 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3 , __A : Dict=2_2_4 , __A : Tuple=3_2 , __A : str="quick_gelu" , __A : Dict=1e-5 , __A : Optional[int]=0.0 , __A : List[Any]=0.02 , __A : int=1.0 , **__A : Optional[int] , ):
super().__init__(**__A )
__UpperCamelCase = hidden_size
__UpperCamelCase = intermediate_size
__UpperCamelCase = projection_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = num_channels
__UpperCamelCase = patch_size
__UpperCamelCase = image_size
__UpperCamelCase = initializer_range
__UpperCamelCase = initializer_factor
__UpperCamelCase = attention_dropout
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = hidden_act
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
__UpperCamelCase , __UpperCamelCase = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
__UpperCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] ="altclip"
SCREAMING_SNAKE_CASE_ : Optional[int] =True
def __init__( self : Any , __A : List[str]=None , __A : List[Any]=None , __A : List[str]=7_6_8 , __A : List[str]=2.6592 , **__A : Dict ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__UpperCamelCase = kwargs.pop('text_config_dict' , __A )
__UpperCamelCase = kwargs.pop('vision_config_dict' , __A )
super().__init__(**__A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__UpperCamelCase = {}
# This is the complete result when using `text_config_dict`.
__UpperCamelCase = AltCLIPTextConfig(**__A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__UpperCamelCase = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCamelCase = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__UpperCamelCase = {}
# This is the complete result when using `vision_config_dict`.
__UpperCamelCase = AltCLIPVisionConfig(**__A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__UpperCamelCase = {
str(__A ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__UpperCamelCase = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCamelCase = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(__A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__UpperCamelCase = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
__UpperCamelCase = AltCLIPTextConfig(**__A )
__UpperCamelCase = AltCLIPVisionConfig(**__A )
__UpperCamelCase = projection_dim
__UpperCamelCase = logit_scale_init_value
__UpperCamelCase = 1.0
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , __A : AltCLIPTextConfig , __A : AltCLIPVisionConfig , **__A : Optional[Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A )
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.text_config.to_dict()
__UpperCamelCase = self.vision_config.to_dict()
__UpperCamelCase = self.__class__.model_type
return output
| 53 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__lowerCAmelCase : Optional[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__lowerCAmelCase : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a__ ( A_ ):
'''simple docstring'''
return x[0]
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_letter_count(A_ )
__magic_name__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A_ )
__magic_name__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A_ )
__magic_name__ = """""".join(freq_to_letter[freq] )
__magic_name__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A_, reverse=A_ )
__magic_name__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_frequency_order(A_ )
__magic_name__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : int = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCAmelCase : Any = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def a__ ( A_=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_A ) )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = None
a__ = None
def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
__magic_name__ = dataset_module_factory(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
__magic_name__ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
__magic_name__ = builder_cls(
cache_dir=UpperCamelCase__ , config_name=UpperCamelCase__ , hash=dataset_module.hash , )
__magic_name__ = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCamelCase__ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__magic_name__ = cached_path(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__magic_name__ = None
builder_instance.download_and_prepare()
__magic_name__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path, dataset=A_ )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
__magic_name__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(A_, A_ )
assert "train" in ds
assert isinstance(ds["""train"""], A_ )
assert next(iter(ds["""train"""] ) )
| 88 | 0 |
'''simple docstring'''
a_ : str = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a_ : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a_ : int = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 55 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = torch.nn.Linear(10 , 10 )
__magic_name__ = torch.optim.SGD(model.parameters() , 0.1 )
__magic_name__ = Accelerator()
__magic_name__ = accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 88 | 0 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> np.array:
'''simple docstring'''
snake_case_ = F"{sampling_rate}"
snake_case_ = '''1'''
snake_case_ = '''f32le'''
snake_case_ = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__UpperCAmelCase, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
snake_case_ = ffmpeg_process.communicate(__UpperCAmelCase )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
snake_case_ = output_stream[0]
snake_case_ = np.frombuffer(__UpperCAmelCase, np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = "f32le", ) -> List[Any]:
'''simple docstring'''
snake_case_ = F"{sampling_rate}"
snake_case_ = '''1'''
if format_for_conversion == "s16le":
snake_case_ = 2
elif format_for_conversion == "f32le":
snake_case_ = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
snake_case_ = platform.system()
if system == "Linux":
snake_case_ = '''alsa'''
snake_case_ = '''default'''
elif system == "Darwin":
snake_case_ = '''avfoundation'''
snake_case_ = ''':0'''
elif system == "Windows":
snake_case_ = '''dshow'''
snake_case_ = '''default'''
snake_case_ = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
snake_case_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
snake_case_ = _ffmpeg_stream(__UpperCAmelCase, __UpperCAmelCase )
for item in iterator:
yield item
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = "f32le", ) -> Dict:
'''simple docstring'''
if stream_chunk_s is not None:
snake_case_ = stream_chunk_s
else:
snake_case_ = chunk_length_s
snake_case_ = ffmpeg_microphone(__UpperCAmelCase, __UpperCAmelCase, format_for_conversion=__UpperCAmelCase )
if format_for_conversion == "s16le":
snake_case_ = np.intaa
snake_case_ = 2
elif format_for_conversion == "f32le":
snake_case_ = np.floataa
snake_case_ = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
snake_case_ = chunk_length_s / 6
snake_case_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__UpperCAmelCase, (int, float) ):
snake_case_ = [stride_length_s, stride_length_s]
snake_case_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
snake_case_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
snake_case_ = datetime.datetime.now()
snake_case_ = datetime.timedelta(seconds=__UpperCAmelCase )
for item in chunk_bytes_iter(__UpperCAmelCase, __UpperCAmelCase, stride=(stride_left, stride_right), stream=__UpperCAmelCase ):
# Put everything back in numpy scale
snake_case_ = np.frombuffer(item['''raw'''], dtype=__UpperCAmelCase )
snake_case_ = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
snake_case_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = False ) -> Any:
'''simple docstring'''
snake_case_ = b''''''
snake_case_ ,snake_case_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
snake_case_ = 0
for raw in iterator:
acc += raw
if stream and len(__UpperCAmelCase ) < chunk_len:
snake_case_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__UpperCAmelCase ) >= chunk_len:
# We are flushing the accumulator
snake_case_ = (_stride_left, stride_right)
snake_case_ = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
snake_case_ = False
yield item
snake_case_ = stride_left
snake_case_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__UpperCAmelCase ) > stride_left:
snake_case_ = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
snake_case_ = False
yield item
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = 2**24 # 16Mo
try:
with subprocess.Popen(__UpperCAmelCase, stdout=subprocess.PIPE, bufsize=__UpperCAmelCase ) as ffmpeg_process:
while True:
snake_case_ = ffmpeg_process.stdout.read(__UpperCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 56 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Optional[int] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=1 ) -> str:
"""simple docstring"""
__magic_name__ = tokenizer
__magic_name__ = dataset
__magic_name__ = len(UpperCamelCase__ ) if n_tasks is None else n_tasks
__magic_name__ = n_copies
def __iter__( self : List[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
__magic_name__ = self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = start_length
__magic_name__ = eof_strings
__magic_name__ = tokenizer
def __call__( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__magic_name__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = re.split("""(%s)""" % """|""".join(A_ ), A_ )
# last string should be ""
return "".join(string_list[:-2] )
def a__ ( A_, A_, A_, A_, A_, A_=20, **A_ ):
'''simple docstring'''
__magic_name__ = defaultdict(A_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A_ ) ):
with torch.no_grad():
__magic_name__ = batch["""ids"""].shape[-1]
__magic_name__ = accelerator.unwrap_model(A_ ).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]], num_return_sequences=A_, **A_ )
# each task is generated batch_size times
__magic_name__ = batch["""task_id"""].repeat(A_ )
__magic_name__ = accelerator.pad_across_processes(
A_, dim=1, pad_index=tokenizer.pad_token_id )
__magic_name__ , __magic_name__ = accelerator.gather((generated_tokens, generated_tasks) )
__magic_name__ = generated_tokens.cpu().numpy()
__magic_name__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A_, A_ ):
gen_token_dict[task].append(A_ )
__magic_name__ = [[] for _ in range(A_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__magic_name__ = tokenizer.decode(A_, skip_special_tokens=A_, clean_up_tokenization_spaces=A_ )
code_gens[task].append(remove_last_block(A_ ) )
return code_gens
def a__ ( ):
'''simple docstring'''
__magic_name__ = HfArgumentParser(A_ )
__magic_name__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__magic_name__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__magic_name__ = """false"""
if args.num_workers is None:
__magic_name__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__magic_name__ = Accelerator()
set_seed(args.seed, device_specific=A_ )
# Load model and tokenizer
__magic_name__ = AutoTokenizer.from_pretrained(args.model_ckpt )
__magic_name__ = tokenizer.eos_token
__magic_name__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__magic_name__ = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0, A_, A_ )] ),
}
# Load evaluation dataset and metric
__magic_name__ = load_dataset("""openai_humaneval""" )
__magic_name__ = load_metric("""code_eval""" )
__magic_name__ = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] )
__magic_name__ = args.n_samples // args.batch_size
__magic_name__ = TokenizedDataset(A_, human_eval["""test"""], n_copies=A_, n_tasks=A_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__magic_name__ = DataLoader(A_, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__magic_name__ = code_eval_metric.compute(references=[""""""], predictions=[[""""""]] )
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""" )
raise exception
__magic_name__ , __magic_name__ = accelerator.prepare(A_, A_ )
__magic_name__ = complete_code(
A_, A_, A_, A_, n_tasks=A_, batch_size=args.batch_size, **A_, )
if accelerator.is_main_process:
__magic_name__ = []
for task in tqdm(range(A_ ) ):
__magic_name__ = human_eval["""test"""][task]["""test"""]
__magic_name__ = f'''check({human_eval['test'][task]['entry_point']})'''
references.append("""\n""" + test_func + """\n""" + entry_point )
# Evaluate completions with "code_eval" metric
__magic_name__ , __magic_name__ = code_eval_metric.compute(
references=A_, predictions=A_, num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file, """w""" ) as fp:
json.dump(A_, A_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 88 | 0 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = XGBClassifier()
classifier.fit(_UpperCamelCase , _UpperCamelCase )
return classifier
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = load_iris()
__lowerCAmelCase , __lowerCAmelCase = data_handling(_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.25 )
__lowerCAmelCase = iris["target_names"]
# Create an XGBoost Classifier from the training data
__lowerCAmelCase = xgboost(_UpperCamelCase , _UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 57 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
'''simple docstring'''
__magic_name__ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""", type=A_, default=1, help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""", type=A_, help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
), )
# rest from the training program
parser.add_argument("""training_script_args""", nargs=A_ )
return parser.parse_args()
def a__ ( ):
'''simple docstring'''
__magic_name__ = parse_args()
# Import training_script as a module.
__magic_name__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__magic_name__ = script_fpath.stem
__magic_name__ = importlib.import_module(A_ )
# Patch sys.argv
__magic_name__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 88 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=8 ) ->Tuple:
_SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A , A , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=A , scheduler=A , movq=A , )
_SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_( self , A , A , A , A , A , A ) -> Union[str, Any]:
if latents is None:
_SCREAMING_SNAKE_CASE = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_SCREAMING_SNAKE_CASE = latents.to(A )
_SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def snake_case_( self , A=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_SCREAMING_SNAKE_CASE = torch.device(f'cuda:{gpu_id}' )
_SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def snake_case_( self , A=0 ) -> str:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_SCREAMING_SNAKE_CASE = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cpu_offload_with_hook(A , A , prev_module_hook=A )
# We'll offload the last model manually.
_SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_( self ) -> Tuple:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self , A , A , A = 512 , A = 512 , A = 100 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> List[str]:
_SCREAMING_SNAKE_CASE = self._execution_device
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = torch.cat(A , dim=0 )
_SCREAMING_SNAKE_CASE = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = torch.cat(A , dim=0 )
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(A , dim=0 )
_SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(A , dim=0 )
_SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps
_SCREAMING_SNAKE_CASE = self.unet.config.in_channels
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = downscale_height_and_width(A , A , self.movq_scale_factor )
# create initial latent
_SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = {"""image_embeds""": image_embeds}
_SCREAMING_SNAKE_CASE = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(
A , A , A , generator=A , )[0]
# post-processing
_SCREAMING_SNAKE_CASE = self.movq.decode(A , force_not_quantize=A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
_SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 58 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pegasus"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=5_0265 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Union[str, Any]=4096 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : List[str]=4096 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : int=False , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Tuple=1 , **UpperCamelCase__ : Union[str, Any] , ) -> str:
"""simple docstring"""
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = d_model
__magic_name__ = encoder_ffn_dim
__magic_name__ = encoder_layers
__magic_name__ = encoder_attention_heads
__magic_name__ = decoder_ffn_dim
__magic_name__ = decoder_layers
__magic_name__ = decoder_attention_heads
__magic_name__ = dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = activation_function
__magic_name__ = init_std
__magic_name__ = encoder_layerdrop
__magic_name__ = decoder_layerdrop
__magic_name__ = use_cache
__magic_name__ = encoder_layers
__magic_name__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
@property
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
return self.d_model
| 88 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
__lowerCamelCase = {"""mobilebert-uncased""": 5_12}
__lowerCamelCase = {}
class UpperCAmelCase ( A_ ):
A__ : Union[str, Any] = VOCAB_FILES_NAMES
A__ : Dict = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_INIT_CONFIGURATION
A__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] = MobileBertTokenizer
def __init__(self : str , snake_case__ : List[str]=None , snake_case__ : int=None , snake_case__ : List[str]=True , snake_case__ : str="[UNK]" , snake_case__ : int="[SEP]" , snake_case__ : Optional[Any]="[PAD]" , snake_case__ : Any="[CLS]" , snake_case__ : Tuple="[MASK]" , snake_case__ : Tuple=True , snake_case__ : str=None , **snake_case__ : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
snake_case : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
snake_case : Tuple = getattr(snake_case__ , normalizer_state.pop("type" ) )
snake_case : Optional[Any] = do_lower_case
snake_case : Dict = strip_accents
snake_case : Union[str, Any] = tokenize_chinese_chars
snake_case : Tuple = normalizer_class(**snake_case__ )
snake_case : Optional[Any] = do_lower_case
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : Dict = [self.sep_token_id]
snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
snake_case : List[Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 59 |
import re
import string
import numpy as np
import datasets
__lowerCAmelCase : Optional[int] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase : Optional[int] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Tuple=False , ) -> Dict:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in predictions] )
__magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in references] )
else:
__magic_name__ = np.asarray(UpperCamelCase__ )
__magic_name__ = np.asarray(UpperCamelCase__ )
if ignore_case:
__magic_name__ = np.char.lower(UpperCamelCase__ )
__magic_name__ = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
__magic_name__ = string.punctuation.maketrans("""""" , """""" , string.punctuation )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
__magic_name__ = string.digits.maketrans("""""" , """""" , string.digits )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__magic_name__ = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 88 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.