code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowercase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__lowercase : str = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class _A ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,"""schedulers/""" ) )
snake_case : str = self.diffusers_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_ ,"""src/diffusers/schedulers/scheduling_ddpm.py""" ) ,os.path.join(self.diffusers_dir ,"""schedulers/scheduling_ddpm.py""" ) ,)
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
snake_case : str = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
snake_case : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
snake_case : str = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 )
snake_case : int = black.format_str(SCREAMING_SNAKE_CASE_ ,mode=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = os.path.join(self.diffusers_dir ,"""new_code.py""" )
with open(SCREAMING_SNAKE_CASE_ ,"""w""" ,newline="""\n""" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ ,"""r""" ) as f:
self.assertTrue(f.read() ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,REFERENCE_CODE + """\n""" ,)
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ,"""DDPMSchedulerOutput""" ,SCREAMING_SNAKE_CASE_ ,)
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,re.sub("""DDPM""" ,"""Test""" ,SCREAMING_SNAKE_CASE_ ) ,)
# Copy consistency with a really long name
snake_case : Optional[int] = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" ,F"""{long_class_name}SchedulerOutput""" ,re.sub("""Bert""" ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" ,"""TestSchedulerOutput""" ,SCREAMING_SNAKE_CASE_ ,overwrite_result=re.sub("""DDPM""" ,"""Test""" ,SCREAMING_SNAKE_CASE_ ) ,)
| 36 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowercase : Dict = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = ['''pixel_values''']
def __init__( self ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = 1 / 255 ,SCREAMING_SNAKE_CASE_ = True ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = True ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = size if size is not None else {"""shortest_edge""": 224}
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : Optional[Any] = do_resize
snake_case : Union[str, Any] = size
snake_case : Dict = resample
snake_case : Dict = do_rescale
snake_case : Dict = rescale_factor
snake_case : List[str] = do_center_crop
snake_case : Dict = crop_size
snake_case : Any = do_flip_channel_order
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = PIL.Image.BILINEAR ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : str = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ ,size=size["""shortest_edge"""] ,default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE_ ,size=(size["""height"""], size["""width"""]) ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE_ ,data_format=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
snake_case : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : List[str] = resample if resample is not None else self.resample
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : Tuple = size if size is not None else self.size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,default_to_square=SCREAMING_SNAKE_CASE_ )
snake_case : str = crop_size if crop_size is not None else self.crop_size
snake_case : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ ,param_name="""crop_size""" )
snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
snake_case : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ,resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
snake_case : Optional[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ ,size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ ,scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[int] = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : List[Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) for image in images]
snake_case : int = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ ,tensor_type=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
snake_case : int = target_sizes.numpy()
snake_case : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
snake_case : Optional[int] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="""bilinear""" ,align_corners=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Tuple = logits.argmax(dim=1 )
snake_case : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36 | 1 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase : int = 128 , UpperCAmelCase : int = 256 , UpperCAmelCase : float = 2_0_0_0.0 , UpperCAmelCase : int = 768 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 64 , UpperCAmelCase : int = 2048 , UpperCAmelCase : float = 0.1 , ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Sequential(
nn.Linear(UpperCAmelCase , d_model * 4 , bias=UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCAmelCase ) , nn.SiLU() , )
__lowerCamelCase : str = nn.Embedding(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = nn.Dropout(p=UpperCAmelCase )
__lowerCamelCase : str = nn.ModuleList()
for lyr_num in range(UpperCAmelCase ):
# FiLM conditional T5 decoder
__lowerCamelCase : List[str] = DecoderLayer(d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase )
self.decoders.append(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = TaLayerNorm(UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Dropout(p=UpperCAmelCase )
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : int ):
__lowerCamelCase : List[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : str ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCamelCase : str = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowerCamelCase : Optional[int] = self.conditioning_emb(UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCamelCase : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCamelCase : int = torch.broadcast_to(
torch.arange(UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowerCamelCase : Optional[Any] = self.position_encoding(UpperCAmelCase )
__lowerCamelCase : List[str] = self.continuous_inputs_projection(UpperCAmelCase )
inputs += position_encodings
__lowerCamelCase : List[Any] = self.dropout(UpperCAmelCase )
# decoder: No padding present.
__lowerCamelCase : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowerCamelCase : Optional[Any] = [(x, self.encoder_decoder_mask(UpperCAmelCase , UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCamelCase : Union[str, Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowerCamelCase : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowerCamelCase : List[Any] = lyr(
UpperCAmelCase , conditioning_emb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )[0]
__lowerCamelCase : Dict = self.decoder_norm(UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.post_dropout(UpperCAmelCase )
__lowerCamelCase : str = self.spec_out(UpperCAmelCase )
return spec_out
class _snake_case ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str]=1E-6 ):
super().__init__()
__lowerCamelCase : Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , dropout_rate=UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , dropout_rate=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Dict=None , ):
__lowerCamelCase : Union[str, Any] = self.layer[0](
UpperCAmelCase , conditioning_emb=UpperCAmelCase , attention_mask=UpperCAmelCase , )
if encoder_hidden_states is not None:
__lowerCamelCase : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
__lowerCamelCase : Dict = self.layer[1](
UpperCAmelCase , key_value_states=UpperCAmelCase , attention_mask=UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
__lowerCamelCase : List[Any] = self.layer[-1](UpperCAmelCase , UpperCAmelCase )
return (hidden_states,)
class _snake_case ( nn.Module ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict ):
super().__init__()
__lowerCamelCase : Union[str, Any] = TaLayerNorm(UpperCAmelCase )
__lowerCamelCase : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = Attention(query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , out_bias=UpperCAmelCase , scale_qk=UpperCAmelCase )
__lowerCamelCase : Tuple = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=None , ):
# pre_self_attention_layer_norm
__lowerCamelCase : int = self.layer_norm(UpperCAmelCase )
if conditioning_emb is not None:
__lowerCamelCase : Optional[Any] = self.FiLMLayer(UpperCAmelCase , UpperCAmelCase )
# Self-attention block
__lowerCamelCase : Optional[Any] = self.attention(UpperCAmelCase )
__lowerCamelCase : Dict = hidden_states + self.dropout(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
super().__init__()
__lowerCamelCase : str = Attention(query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , out_bias=UpperCAmelCase , scale_qk=UpperCAmelCase )
__lowerCamelCase : List[Any] = TaLayerNorm(UpperCAmelCase , eps=UpperCAmelCase )
__lowerCamelCase : str = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=None , ):
__lowerCamelCase : str = self.layer_norm(UpperCAmelCase )
__lowerCamelCase : Dict = self.attention(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
__lowerCamelCase : List[str] = hidden_states + self.dropout(UpperCAmelCase )
return layer_output
class _snake_case ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ):
super().__init__()
__lowerCamelCase : str = TaDenseGatedActDense(d_model=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCAmelCase )
__lowerCamelCase : Dict = TaLayerNorm(UpperCAmelCase , eps=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=None ):
__lowerCamelCase : int = self.layer_norm(UpperCAmelCase )
if conditioning_emb is not None:
__lowerCamelCase : Union[str, Any] = self.film(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[str] = self.DenseReluDense(UpperCAmelCase )
__lowerCamelCase : Optional[int] = hidden_states + self.dropout(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : str = nn.Dropout(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = NewGELUActivation()
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[Any] ):
__lowerCamelCase : Union[str, Any] = self.act(self.wi_a(UpperCAmelCase ) )
__lowerCamelCase : Any = self.wi_a(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = hidden_gelu * hidden_linear
__lowerCamelCase : Any = self.dropout(UpperCAmelCase )
__lowerCamelCase : List[Any] = self.wo(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[str]=1E-6 ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Parameter(torch.ones(UpperCAmelCase ) )
__lowerCamelCase : Tuple = eps
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Any ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__lowerCamelCase : int = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCAmelCase )
__lowerCamelCase : Dict = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCamelCase : Union[str, Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _snake_case ( nn.Module ):
def lowerCamelCase__ ( self : str , UpperCAmelCase : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(UpperCAmelCase , 3.0 )) ))
class _snake_case ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ):
super().__init__()
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , out_features * 2 , bias=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ):
__lowerCamelCase : Optional[Any] = self.scale_bias(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Dict = torch.chunk(UpperCAmelCase , 2 , -1 )
__lowerCamelCase : List[Any] = x * (1 + scale) + shift
return x | 366 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : Any=3 , UpperCAmelCase : str=18 , UpperCAmelCase : str=30 , UpperCAmelCase : Any=400 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=None , UpperCAmelCase : List[str]=True , UpperCAmelCase : int=[0.5, 0.5, 0.5] , UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , ):
__lowerCamelCase : Any = size if size is not None else {"shortest_edge": 18}
__lowerCamelCase : Dict = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCamelCase : List[str] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : int = min_resolution
__lowerCamelCase : List[Any] = max_resolution
__lowerCamelCase : List[str] = do_resize
__lowerCamelCase : str = size
__lowerCamelCase : Tuple = do_center_crop
__lowerCamelCase : Optional[int] = crop_size
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : Optional[Any] = image_mean
__lowerCamelCase : List[Any] = image_std
def lowerCamelCase__ ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[int] = LevitImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__lowerCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : Any ):
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : int = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self : Any ):
# Initialize image_processing
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : Dict = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processing
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCamelCase : List[str] = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , ) | 366 | 1 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase__ )
_UpperCAmelCase =-1
_UpperCAmelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
_UpperCAmelCase =model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
_UpperCAmelCase =tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase =TextStreamer(UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase =cs.out[:-1]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase__ )
_UpperCAmelCase =-1
_UpperCAmelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
_UpperCAmelCase =model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
_UpperCAmelCase =tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase =TextIteratorStreamer(UpperCAmelCase__ )
_UpperCAmelCase ={"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCAmelCase =Thread(target=model.generate , kwargs=UpperCAmelCase__ )
thread.start()
_UpperCAmelCase =""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase__ )
_UpperCAmelCase =-1
_UpperCAmelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
_UpperCAmelCase =model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ )
_UpperCAmelCase =greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase =tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase =TextStreamer(UpperCAmelCase__ , skip_prompt=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=10 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase =cs.out[:-1]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_UpperCAmelCase =AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCAmelCase =AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(UpperCAmelCase__ )
_UpperCAmelCase =-1
_UpperCAmelCase =torch.ones((1, 5) , device=UpperCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase =TextStreamer(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
model.generate(UpperCAmelCase__ , max_new_tokens=1 , do_sample=UpperCAmelCase__ , streamer=UpperCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase =cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase =tokenizer(UpperCAmelCase__ , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase =AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase__ )
_UpperCAmelCase =-1
_UpperCAmelCase =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase__ )
_UpperCAmelCase =TextIteratorStreamer(UpperCAmelCase__ , timeout=0.001 )
_UpperCAmelCase ={"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCAmelCase =Thread(target=model.generate , kwargs=UpperCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase__ ):
_UpperCAmelCase =""
for new_text in streamer:
streamer_text += new_text
| 408 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase =numpy.array([0, 0])
__lowerCAmelCase =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase =numpy.array([1, 0])
__lowerCAmelCase =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = initial_vectors
for _ in range(_UpperCAmelCase ):
a_ = iteration_step(_UpperCAmelCase )
return vectors
def a ( _UpperCAmelCase ) -> list[numpy.ndarray]:
"""simple docstring"""
a_ = []
for i, start_vector in enumerate(vectors[:-1] ):
a_ = vectors[i + 1]
new_vectors.append(_UpperCAmelCase )
a_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> numpy.ndarray:
"""simple docstring"""
a_ = numpy.radians(_UpperCAmelCase )
a_ , a_ = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase )
a_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase ) -> None:
"""simple docstring"""
a_ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
a_ , a_ = zip(*_UpperCAmelCase )
plt.plot(_UpperCAmelCase , _UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 697 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( a : Optional[Any] , a : List[Any]=None ) -> Dict:
"""simple docstring"""
a__ :Tuple = None
if token is not None:
a__ :Tuple = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
a__ :List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
a__ :int = requests.get(a , headers=a ).json()
a__ :Union[str, Any] = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
a__ :Union[str, Any] = math.ceil((result["total_count"] - 100) / 100 )
for i in range(a ):
a__ :Tuple = requests.get(url + F'''&page={i + 2}''' , headers=a ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase__ ( a : int , a : List[str]=None ) -> List[str]:
"""simple docstring"""
a__ :List[Any] = None
if token is not None:
a__ :List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
a__ :Union[str, Any] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
a__ :List[str] = requests.get(a , headers=a ).json()
a__ :Tuple = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
a__ :Union[str, Any] = math.ceil((result["total_count"] - 100) / 100 )
for i in range(a ):
a__ :Optional[int] = requests.get(url + F'''&page={i + 2}''' , headers=a ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase__ ( a : Optional[Any] , a : str , a : Dict , a : List[Any] ) -> Dict:
"""simple docstring"""
a__ :int = None
if token is not None:
a__ :List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
a__ :Optional[Any] = requests.get(a , headers=a , allow_redirects=a )
a__ :int = result.headers["Location"]
a__ :List[Any] = requests.get(a , allow_redirects=a )
a__ :int = os.path.join(a , F'''{artifact_name}.zip''' )
with open(a , "wb" ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( a : Any , a : Optional[Any]=None ) -> Optional[int]:
"""simple docstring"""
a__ :Dict = []
a__ :Optional[Any] = []
a__ :List[Any] = None
with zipfile.ZipFile(a ) as z:
for filename in z.namelist():
if not os.path.isdir(a ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(a ) as f:
for line in f:
a__ :List[Any] = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
a__ :List[str] = line[: line.index(": " )]
a__ :Optional[int] = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
a__ :Any = line[len("FAILED " ) :]
failed_tests.append(a )
elif filename == "job_name.txt":
a__ :Optional[Any] = line
if len(a ) != len(a ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(a )} for `errors` '''
F'''and {len(a )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
a__ :List[Any] = None
if job_name and job_links:
a__ :List[Any] = job_links.get(a , a )
# A list with elements of the form (line of error, error, failed test)
a__ :Tuple = [x + [y] + [job_link] for x, y in zip(a , a )]
return result
def lowerCamelCase__ ( a : Optional[Any] , a : int=None ) -> List[str]:
"""simple docstring"""
a__ :Optional[Any] = []
a__ :List[str] = [os.path.join(a , a ) for p in os.listdir(a ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(a , job_links=a ) )
return errors
def lowerCamelCase__ ( a : int , a : Optional[int]=None ) -> Any:
"""simple docstring"""
a__ :int = Counter()
counter.update([x[1] for x in logs] )
a__ :Union[str, Any] = counter.most_common()
a__ :Optional[Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
a__ :Dict = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
a__ :Any = dict(sorted(r.items() , key=lambda a : item[1]["count"] , reverse=a ) )
return r
def lowerCamelCase__ ( a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a__ :Tuple = test.split("::" )[0]
if test.startswith("tests/models/" ):
a__ :Optional[int] = test.split("/" )[2]
else:
a__ :str = None
return test
def lowerCamelCase__ ( a : Tuple , a : List[Any]=None ) -> str:
"""simple docstring"""
a__ :Optional[int] = [(x[0], x[1], get_model(x[2] )) for x in logs]
a__ :Union[str, Any] = [x for x in logs if x[2] is not None]
a__ :List[str] = {x[2] for x in logs}
a__ :Tuple = {}
for test in tests:
a__ :Dict = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
a__ :Optional[int] = counter.most_common()
a__ :Tuple = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
a__ :Any = sum(error_counts.values() )
if n_errors > 0:
a__ :Optional[Any] = {"count": n_errors, "errors": error_counts}
a__ :str = dict(sorted(r.items() , key=lambda a : item[1]["count"] , reverse=a ) )
return r
def lowerCamelCase__ ( a : Any ) -> Any:
"""simple docstring"""
a__ :List[Any] = "| no. | error | status |"
a__ :Optional[int] = "|-:|:-|:-|"
a__ :List[str] = [header, sep]
for error in reduced_by_error:
a__ :List[Any] = reduced_by_error[error]["count"]
a__ :str = F'''| {count} | {error[:100]} | |'''
lines.append(a )
return "\n".join(a )
def lowerCamelCase__ ( a : Dict ) -> Optional[Any]:
"""simple docstring"""
a__ :Any = "| model | no. of errors | major error | count |"
a__ :List[str] = "|-:|-:|-:|-:|"
a__ :Optional[int] = [header, sep]
for model in reduced_by_model:
a__ :Dict = reduced_by_model[model]["count"]
a__ , a__ :List[Any] = list(reduced_by_model[model]["errors"].items() )[0]
a__ :Tuple = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(a )
return "\n".join(a )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
snake_case__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
snake_case__ = get_job_links(args.workflow_run_id, token=args.token)
snake_case__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
snake_case__ = k.find(''' / ''')
snake_case__ = k[index + len(''' / ''') :]
snake_case__ = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
snake_case__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
snake_case__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
snake_case__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
snake_case__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
snake_case__ = reduce_by_error(errors)
snake_case__ = reduce_by_model(errors)
snake_case__ = make_github_table(reduced_by_error)
snake_case__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 373 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCAmelCase_ ( _a):
lowerCamelCase_ = 'pegasus'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any , __A : Dict=50265 , __A : List[Any]=1024 , __A : int=12 , __A : Optional[Any]=4096 , __A : Optional[int]=16 , __A : Dict=12 , __A : List[Any]=4096 , __A : List[str]=16 , __A : Optional[int]=0.0 , __A : List[Any]=0.0 , __A : List[str]=True , __A : Optional[int]=True , __A : str="gelu" , __A : Tuple=1024 , __A : Any=0.1 , __A : List[Any]=0.0 , __A : List[str]=0.0 , __A : Tuple=0.02 , __A : Union[str, Any]=0 , __A : Union[str, Any]=False , __A : Optional[Any]=0 , __A : Tuple=1 , __A : str=1 , **__A : Any , ) ->Union[str, Any]:
"""simple docstring"""
a__ :Any = vocab_size
a__ :List[str] = max_position_embeddings
a__ :int = d_model
a__ :Union[str, Any] = encoder_ffn_dim
a__ :List[Any] = encoder_layers
a__ :Union[str, Any] = encoder_attention_heads
a__ :Tuple = decoder_ffn_dim
a__ :List[Any] = decoder_layers
a__ :Tuple = decoder_attention_heads
a__ :Optional[int] = dropout
a__ :str = attention_dropout
a__ :Optional[int] = activation_dropout
a__ :str = activation_function
a__ :Dict = init_std
a__ :Any = encoder_layerdrop
a__ :int = decoder_layerdrop
a__ :Union[str, Any] = use_cache
a__ :List[Any] = encoder_layers
a__ :Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
@property
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _snake_case ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return self.d_model
| 373 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
SCREAMING_SNAKE_CASE_ = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __snake_case ( ):
"""simple docstring"""
UpperCamelCase = (
list(range(ord('''!''' ) ,ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) ,ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) ,ord('''ÿ''' ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase ,_lowercase ) )
def __snake_case ( _lowercase ):
"""simple docstring"""
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="replace" , lowerCamelCase_="<s>" , lowerCamelCase_="</s>" , lowerCamelCase_="</s>" , lowerCamelCase_="<s>" , lowerCamelCase_="<unk>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_=False , **lowerCamelCase_ , ) -> Dict:
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''') as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_)
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding='''utf-8''') as merges_handle:
UpperCamelCase = merges_handle.read().split('''\n''')[1:-1]
UpperCamelCase = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_))))
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''')
@property
def UpperCAmelCase__ ( self) -> str:
return len(self.encoder)
def UpperCAmelCase__ ( self) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_)
UpperCamelCase = get_pairs(lowerCamelCase_)
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_: self.bpe_ranks.get(lowerCamelCase_ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase = tuple(lowerCamelCase_)
UpperCamelCase = new_word
if len(lowerCamelCase_) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_)
UpperCamelCase = ''' '''.join(lowerCamelCase_)
UpperCamelCase = word
return word
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Dict:
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_):
UpperCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_).split(''' '''))
return bpe_tokens
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Union[str, Any]:
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token))
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[Any]:
return self.decoder.get(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = ''''''.join(lowerCamelCase_)
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text]).decode('''utf-8''' , errors=self.errors)
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_) + '''\n''')
UpperCamelCase = 0
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''')
UpperCamelCase = token_index
writer.write(''' '''.join(lowerCamelCase_) + '''\n''')
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_)
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_)) + [1]
return [1] + ([0] * len(lowerCamelCase_)) + [1, 1] + ([0] * len(lowerCamelCase_)) + [1]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=False , **lowerCamelCase_) -> Dict:
UpperCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_) > 0 and not text[0].isspace()):
UpperCamelCase = ''' ''' + text
return (text, kwargs) | 34 | from __future__ import annotations
def UpperCamelCase ( __lowercase : list[int] ):
'''simple docstring'''
A_ : int = len(__lowercase ) // 2
# choose the middle 3 elements
A_ : List[Any] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 558 | 0 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> float:
'''simple docstring'''
_UpperCamelCase : str = x
_UpperCamelCase : Any = y
for step in range(UpperCAmelCase ): # noqa: B007
_UpperCamelCase : Any = a * a - b * b + x
_UpperCamelCase : Any = 2 * a * b + y
_UpperCamelCase : Optional[int] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __A ( UpperCAmelCase ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __A ( UpperCAmelCase ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase ,1 ,1 ) )
def __A ( UpperCAmelCase = 8_0_0 ,UpperCAmelCase = 6_0_0 ,UpperCAmelCase = -0.6 ,UpperCAmelCase = 0 ,UpperCAmelCase = 3.2 ,UpperCAmelCase = 5_0 ,UpperCAmelCase = True ,) -> Image.Image:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = Image.new("RGB" ,(image_width, image_height) )
_UpperCamelCase : str = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase ):
for image_y in range(UpperCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
_UpperCamelCase : Dict = figure_width / image_width * image_height
_UpperCamelCase : str = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCamelCase : Any = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCamelCase : Any = get_distance(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCamelCase : str = get_color_coded_rgb(UpperCAmelCase )
else:
_UpperCamelCase : Optional[Any] = get_black_and_white_rgb(UpperCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase_ : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 204 | '''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = 42
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowercase__ : List[str]=3 , lowercase__ : Tuple=3 , lowercase__ : List[str]=("DownEncoderBlock2D",) , lowercase__ : Optional[int]=(64,) , lowercase__ : List[Any]=2 , lowercase__ : int=32 , lowercase__ : Optional[int]="silu" , lowercase__ : List[Any]=True , ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase : Union[str, Any] = layers_per_block
_UpperCamelCase : Union[str, Any] = torch.nn.Convad(
lowercase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : int = None
_UpperCamelCase : Optional[Any] = nn.ModuleList([] )
# down
_UpperCamelCase : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(lowercase__ ):
_UpperCamelCase : int = output_channel
_UpperCamelCase : int = block_out_channels[i]
_UpperCamelCase : Tuple = i == len(lowercase__ ) - 1
_UpperCamelCase : List[Any] = get_down_block(
lowercase__ , num_layers=self.layers_per_block , in_channels=lowercase__ , out_channels=lowercase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowercase__ , resnet_groups=lowercase__ , attention_head_dim=lowercase__ , temb_channels=lowercase__ , )
self.down_blocks.append(lowercase__ )
# mid
_UpperCamelCase : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase__ , temb_channels=lowercase__ , )
# out
_UpperCamelCase : List[str] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase__ , eps=1e-6 )
_UpperCamelCase : Union[str, Any] = nn.SiLU()
_UpperCamelCase : Tuple = 2 * out_channels if double_z else out_channels
_UpperCamelCase : List[Any] = nn.Convad(block_out_channels[-1] , lowercase__ , 3 , padding=1 )
_UpperCamelCase : Tuple = False
def snake_case__ ( self : str , lowercase__ : Any ) ->Any:
'''simple docstring'''
_UpperCamelCase : Dict = x
_UpperCamelCase : str = self.conv_in(lowercase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase__ : Any ):
def custom_forward(*lowercase__ : List[Any] ):
return module(*lowercase__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
_UpperCamelCase : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase__ ) , lowercase__ , use_reentrant=lowercase__ )
# middle
_UpperCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , use_reentrant=lowercase__ )
else:
for down_block in self.down_blocks:
_UpperCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) , lowercase__ )
# middle
_UpperCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase__ )
else:
# down
for down_block in self.down_blocks:
_UpperCamelCase : Tuple = down_block(lowercase__ )
# middle
_UpperCamelCase : Optional[int] = self.mid_block(lowercase__ )
# post-process
_UpperCamelCase : List[Any] = self.conv_norm_out(lowercase__ )
_UpperCamelCase : Tuple = self.conv_act(lowercase__ )
_UpperCamelCase : int = self.conv_out(lowercase__ )
return sample
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : str=3 , lowercase__ : List[Any]=3 , lowercase__ : Optional[int]=("UpDecoderBlock2D",) , lowercase__ : Any=(64,) , lowercase__ : str=2 , lowercase__ : List[Any]=32 , lowercase__ : Optional[int]="silu" , lowercase__ : List[str]="group" , ) ->Any:
'''simple docstring'''
super().__init__()
_UpperCamelCase : List[Any] = layers_per_block
_UpperCamelCase : Any = nn.Convad(
lowercase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = nn.ModuleList([] )
_UpperCamelCase : Optional[int] = in_channels if norm_type == "spatial" else None
# mid
_UpperCamelCase : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase__ , temb_channels=lowercase__ , )
# up
_UpperCamelCase : Dict = list(reversed(lowercase__ ) )
_UpperCamelCase : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowercase__ ):
_UpperCamelCase : Union[str, Any] = output_channel
_UpperCamelCase : Dict = reversed_block_out_channels[i]
_UpperCamelCase : List[str] = i == len(lowercase__ ) - 1
_UpperCamelCase : Tuple = get_up_block(
lowercase__ , num_layers=self.layers_per_block + 1 , in_channels=lowercase__ , out_channels=lowercase__ , prev_output_channel=lowercase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowercase__ , resnet_groups=lowercase__ , attention_head_dim=lowercase__ , temb_channels=lowercase__ , resnet_time_scale_shift=lowercase__ , )
self.up_blocks.append(lowercase__ )
_UpperCamelCase : Any = output_channel
# out
if norm_type == "spatial":
_UpperCamelCase : Any = SpatialNorm(block_out_channels[0] , lowercase__ )
else:
_UpperCamelCase : int = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase__ , eps=1e-6 )
_UpperCamelCase : Tuple = nn.SiLU()
_UpperCamelCase : Tuple = nn.Convad(block_out_channels[0] , lowercase__ , 3 , padding=1 )
_UpperCamelCase : Optional[Any] = False
def snake_case__ ( self : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any]=None ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : str = z
_UpperCamelCase : str = self.conv_in(lowercase__ )
_UpperCamelCase : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowercase__ : Optional[int] ):
def custom_forward(*lowercase__ : List[str] ):
return module(*lowercase__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
_UpperCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , lowercase__ , use_reentrant=lowercase__ )
_UpperCamelCase : Union[str, Any] = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowercase__ ) , lowercase__ , lowercase__ , use_reentrant=lowercase__ )
else:
# middle
_UpperCamelCase : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowercase__ , lowercase__ )
_UpperCamelCase : int = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
_UpperCamelCase : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) , lowercase__ , lowercase__ )
else:
# middle
_UpperCamelCase : str = self.mid_block(lowercase__ , lowercase__ )
_UpperCamelCase : int = sample.to(lowercase__ )
# up
for up_block in self.up_blocks:
_UpperCamelCase : str = up_block(lowercase__ , lowercase__ )
# post-process
if latent_embeds is None:
_UpperCamelCase : Union[str, Any] = self.conv_norm_out(lowercase__ )
else:
_UpperCamelCase : Any = self.conv_norm_out(lowercase__ , lowercase__ )
_UpperCamelCase : List[str] = self.conv_act(lowercase__ )
_UpperCamelCase : Dict = self.conv_out(lowercase__ )
return sample
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : Optional[Any] , lowercase__ : Tuple=None , lowercase__ : Dict="random" , lowercase__ : List[Any]=False , lowercase__ : List[Any]=True ) ->Optional[int]:
'''simple docstring'''
super().__init__()
_UpperCamelCase : List[Any] = n_e
_UpperCamelCase : List[str] = vq_embed_dim
_UpperCamelCase : List[str] = beta
_UpperCamelCase : Any = legacy
_UpperCamelCase : Optional[int] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
_UpperCamelCase : str = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
_UpperCamelCase : List[Any] = self.used.shape[0]
_UpperCamelCase : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
_UpperCamelCase : Any = self.re_embed
_UpperCamelCase : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
_UpperCamelCase : Optional[Any] = n_e
_UpperCamelCase : Optional[Any] = sane_index_shape
def snake_case__ ( self : str , lowercase__ : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : str = inds.shape
assert len(lowercase__ ) > 1
_UpperCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Tuple = self.used.to(lowercase__ )
_UpperCamelCase : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
_UpperCamelCase : Optional[Any] = match.argmax(-1 )
_UpperCamelCase : int = match.sum(2 ) < 1
if self.unknown_index == "random":
_UpperCamelCase : int = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
_UpperCamelCase : Optional[int] = self.unknown_index
return new.reshape(lowercase__ )
def snake_case__ ( self : Any , lowercase__ : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCamelCase : Tuple = inds.shape
assert len(lowercase__ ) > 1
_UpperCamelCase : Dict = inds.reshape(ishape[0] , -1 )
_UpperCamelCase : Optional[Any] = self.used.to(lowercase__ )
if self.re_embed > self.used.shape[0]: # extra token
_UpperCamelCase : int = 0 # simply set to zero
_UpperCamelCase : Tuple = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase__ )
return back.reshape(lowercase__ )
def snake_case__ ( self : List[Any] , lowercase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCamelCase : Any = z.permute(0 , 2 , 3 , 1 ).contiguous()
_UpperCamelCase : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
_UpperCamelCase : Optional[Any] = torch.argmin(torch.cdist(lowercase__ , self.embedding.weight ) , dim=1 )
_UpperCamelCase : List[str] = self.embedding(lowercase__ ).view(z.shape )
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Tuple = None
# compute loss for embedding
if not self.legacy:
_UpperCamelCase : Any = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
_UpperCamelCase : str = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
_UpperCamelCase : List[Any] = z + (z_q - z).detach()
# reshape back to match original input shape
_UpperCamelCase : List[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
_UpperCamelCase : str = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
_UpperCamelCase : int = self.remap_to_used(lowercase__ )
_UpperCamelCase : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
_UpperCamelCase : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case__ ( self : Optional[Any] , lowercase__ : List[str] , lowercase__ : List[str] ) ->Dict:
'''simple docstring'''
if self.remap is not None:
_UpperCamelCase : Tuple = indices.reshape(shape[0] , -1 ) # add batch axis
_UpperCamelCase : str = self.unmap_to_all(lowercase__ )
_UpperCamelCase : str = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
_UpperCamelCase : Dict = self.embedding(lowercase__ )
if shape is not None:
_UpperCamelCase : List[Any] = z_q.view(lowercase__ )
# reshape back to match original input shape
_UpperCamelCase : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : str , lowercase__ : List[Any]=False ) ->Any:
'''simple docstring'''
_UpperCamelCase : Tuple = parameters
_UpperCamelCase , _UpperCamelCase : Tuple = torch.chunk(lowercase__ , 2 , dim=1 )
_UpperCamelCase : str = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
_UpperCamelCase : str = deterministic
_UpperCamelCase : Optional[Any] = torch.exp(0.5 * self.logvar )
_UpperCamelCase : Optional[Any] = torch.exp(self.logvar )
if self.deterministic:
_UpperCamelCase : Optional[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case__ ( self : Any , lowercase__ : Optional[torch.Generator] = None ) ->torch.FloatTensor:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = randn_tensor(
self.mean.shape , generator=lowercase__ , device=self.parameters.device , dtype=self.parameters.dtype )
_UpperCamelCase : Optional[int] = self.mean + self.std * sample
return x
def snake_case__ ( self : str , lowercase__ : Optional[int]=None ) ->List[str]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case__ ( self : List[Any] , lowercase__ : str , lowercase__ : List[Any]=[1, 2, 3] ) ->Dict:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
_UpperCamelCase : Optional[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase__ )
def snake_case__ ( self : List[Any] ) ->str:
'''simple docstring'''
return self.mean
| 204 | 1 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
def merge(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(a_ ) <= 1:
return collection
UpperCamelCase : Union[str, Any] = len(a_ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ : str = input("""Enter numbers separated by a comma:\n""").strip()
__magic_name__ : List[str] = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 102 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = """▁"""
A__ = {"""vocab_file""": """sentencepiece.bpe.model"""}
A__ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
A__ = {
"""xlm-roberta-base""": 512,
"""xlm-roberta-large""": 512,
"""xlm-roberta-large-finetuned-conll02-dutch""": 512,
"""xlm-roberta-large-finetuned-conll02-spanish""": 512,
"""xlm-roberta-large-finetuned-conll03-english""": 512,
"""xlm-roberta-large-finetuned-conll03-german""": 512,
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , __snake_case : Any , __snake_case : str="<s>" , __snake_case : Dict="</s>" , __snake_case : List[Any]="</s>" , __snake_case : str="<s>" , __snake_case : Tuple="<unk>" , __snake_case : int="<pad>" , __snake_case : List[str]="<mask>" , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase :int = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
lowerCamelCase :str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
lowerCamelCase :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
lowerCamelCase :List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase :Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase :Tuple = 1
lowerCamelCase :Dict = len(self.sp_model ) + self.fairseq_offset
lowerCamelCase :Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ):
lowerCamelCase :Optional[Any] = self.__dict__.copy()
lowerCamelCase :int = None
lowerCamelCase :List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , __snake_case : Optional[Any] ):
lowerCamelCase :str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase :List[str] = {}
lowerCamelCase :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase :Optional[int] = [self.cls_token_id]
lowerCamelCase :Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def snake_case ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
lowerCamelCase :Dict = [self.sep_token_id]
lowerCamelCase :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case ( self : int ):
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def snake_case ( self : Optional[int] ):
lowerCamelCase :int = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case ( self : List[str] , __snake_case : str ):
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def snake_case ( self : Tuple , __snake_case : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase :Dict = self.sp_model.PieceToId(__snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case ( self : Any , __snake_case : Any ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case ( self : Dict , __snake_case : List[str] ):
lowerCamelCase :Optional[Any] = ''''''.join(__snake_case ).replace(__snake_case , ''' ''' ).strip()
return out_string
def snake_case ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCamelCase :Tuple = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
lowerCamelCase :Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 166 | 0 |
from manim import *
class snake_case_ ( lowerCAmelCase ):
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE_ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE_ : Dict = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE_ : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : List[str] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : List[str] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ : int = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ : int = Text('CPU' , font_size=24 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE_ : Any = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Text('GPU' , font_size=24 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : Any = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ : str = Text('Model' , font_size=24 )
SCREAMING_SNAKE_CASE_ : int = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Any = []
for i, rect in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = fill.copy().set_fill(__lowerCAmelCase , opacity=0.8 )
target.move_to(__lowerCAmelCase )
model_arr.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : Dict = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_ : int = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ : Tuple = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ : List[str] = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_ : str = Text('Disk' , font_size=24 )
SCREAMING_SNAKE_CASE_ : Tuple = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Dict = Square(0.3 )
input.set_fill(__lowerCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __lowerCAmelCase , buff=0.5 )
self.play(Write(__lowerCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__lowerCAmelCase , buff=0.02 )
self.play(MoveToTarget(__lowerCAmelCase ) )
self.play(FadeOut(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : str = Arrow(start=__lowerCAmelCase , end=__lowerCAmelCase , color=__lowerCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __lowerCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE_ : List[Any] = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
SCREAMING_SNAKE_CASE_ : Any = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__lowerCAmelCase ) , Circumscribe(model_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE_ : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __lowerCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE_ : Any = AnimationGroup(
FadeOut(__lowerCAmelCase , run_time=0.5 ) , MoveToTarget(__lowerCAmelCase , run_time=0.5 ) , FadeIn(__lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__lowerCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE_ : Tuple = 0.7
self.play(
Circumscribe(model_arr[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = a_c
SCREAMING_SNAKE_CASE_ : str = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__lowerCAmelCase ) , FadeOut(__lowerCAmelCase , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE_ : int = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , MoveToTarget(__lowerCAmelCase ) )
self.wait()
| 311 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__: Optional[int] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__: Dict = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__: Any = spec.loader.load_module()
lowerCAmelCase__: Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__: Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowerCAmelCase__: Dict = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __SCREAMING_SNAKE_CASE ( ) -> int:
SCREAMING_SNAKE_CASE_ : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
SCREAMING_SNAKE_CASE_ : Any = False
# source code of `config_class`
SCREAMING_SNAKE_CASE_ : Optional[Any] = inspect.getsource(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = _re_checkpoint.findall(SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
SCREAMING_SNAKE_CASE_ : Optional[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
SCREAMING_SNAKE_CASE_ : Tuple = True
break
SCREAMING_SNAKE_CASE_ : Any = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE_ : str = '\n'.join(sorted(SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 311 | 1 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def _lowerCAmelCase ( lowercase ) -> str:
if not sentence:
return ""
__lowerCAmelCase = dict(zip(lowercase , lowercase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 689 |
'''simple docstring'''
from collections import deque
def _lowerCAmelCase ( lowercase ) -> Dict:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = deque()
__lowerCAmelCase = [False for _ in range(lowercase )]
__lowerCAmelCase = [-1 for _ in range(lowercase )]
__lowerCAmelCase = index_of[:]
def strong_connect(lowercase , lowercase , lowercase ):
__lowerCAmelCase = index # the number when this node is seen
__lowerCAmelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
__lowerCAmelCase = True
for w in g[v]:
if index_of[w] == -1:
__lowerCAmelCase = strong_connect(lowercase , lowercase , lowercase )
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowerCAmelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowerCAmelCase = []
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
while w != v:
__lowerCAmelCase = stack.pop()
__lowerCAmelCase = False
component.append(lowercase )
components.append(lowercase )
return index
__lowerCAmelCase = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase , 0 , lowercase )
return components
def _lowerCAmelCase ( lowercase , lowercase ) -> str:
__lowerCAmelCase = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
_a : Any = 7
_a : Tuple = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_a : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_a : Optional[Any] = [(u, v) for u, v in zip(source, target)]
_a : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 689 | 1 |
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__UpperCamelCase : int = TypeVar("T")
def __UpperCAmelCase ( _snake_case : int ):
return (position - 1) // 2
def __UpperCAmelCase ( _snake_case : int ):
return (2 * position) + 1
def __UpperCAmelCase ( _snake_case : int ):
return (2 * position) + 2
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self : Dict ) -> None:
_lowercase = []
_lowercase = {}
_lowercase = 0
def __len__( self : Optional[Any] ) -> int:
return self.elements
def __repr__( self : Tuple ) -> str:
return str(self.heap )
def _lowerCamelCase ( self : List[str] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def _lowerCamelCase ( self : Optional[int] , _lowercase : T , _lowercase : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
_lowercase = self.elements
self.elements += 1
self._bubble_up(_lowercase )
def _lowerCamelCase ( self : Optional[Any] ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_lowercase , _lowercase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_lowercase , _lowercase = self.heap[0]
self._bubble_down(_lowercase )
return elem
def _lowerCamelCase ( self : Optional[Any] , _lowercase : T , _lowercase : int ) -> None:
# Update the weight of the given key
_lowercase = self.position_map[elem]
_lowercase = (elem, weight)
if position > 0:
_lowercase = get_parent_position(_lowercase )
_lowercase , _lowercase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_lowercase )
else:
self._bubble_down(_lowercase )
else:
self._bubble_down(_lowercase )
def _lowerCamelCase ( self : int , _lowercase : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
_lowercase = self.position_map[elem]
if curr_pos == 0:
return None
_lowercase = get_parent_position(_lowercase )
_lowercase , _lowercase = self.heap[curr_pos]
_lowercase , _lowercase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_lowercase , _lowercase )
return self._bubble_up(_lowercase )
return None
def _lowerCamelCase ( self : Union[str, Any] , _lowercase : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
_lowercase = self.position_map[elem]
_lowercase , _lowercase = self.heap[curr_pos]
_lowercase = get_child_left_position(_lowercase )
_lowercase = get_child_right_position(_lowercase )
if child_left_position < self.elements and child_right_position < self.elements:
_lowercase , _lowercase = self.heap[child_left_position]
_lowercase , _lowercase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_lowercase , _lowercase )
return self._bubble_down(_lowercase )
if child_left_position < self.elements:
_lowercase , _lowercase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_lowercase , _lowercase )
return self._bubble_down(_lowercase )
else:
return None
if child_right_position < self.elements:
_lowercase , _lowercase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_lowercase , _lowercase )
return self._bubble_down(_lowercase )
return None
def _lowerCamelCase ( self : List[Any] , _lowercase : int , _lowercase : int ) -> None:
# Swap the nodes at the given positions
_lowercase = self.heap[nodea_pos][0]
_lowercase = self.heap[nodea_pos][0]
_lowercase , _lowercase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_lowercase = nodea_pos
_lowercase = nodea_pos
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self : Union[str, Any] ) -> None:
_lowercase = {}
_lowercase = 0
def __repr__( self : List[Any] ) -> str:
return str(self.connections )
def __len__( self : Dict ) -> int:
return self.nodes
def _lowerCamelCase ( self : int , _lowercase : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
_lowercase = {}
self.nodes += 1
def _lowerCamelCase ( self : List[Any] , _lowercase : T , _lowercase : T , _lowercase : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(_lowercase )
self.add_node(_lowercase )
_lowercase = weight
_lowercase = weight
def __UpperCAmelCase ( _snake_case : GraphUndirectedWeighted[T], ):
_lowercase = {node: maxsize for node in graph.connections}
_lowercase = {node: None for node in graph.connections}
_lowercase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_snake_case, _snake_case )
if priority_queue.is_empty():
return dist, parent
# initialization
_lowercase = priority_queue.extract_min()
_lowercase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case, dist[neighbour] )
_lowercase = node
# running prim's algorithm
while not priority_queue.is_empty():
_lowercase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_lowercase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_snake_case, dist[neighbour] )
_lowercase = node
return dist, parent | 717 | """simple docstring"""
def __UpperCAmelCase ( _snake_case : list ):
_lowercase = len(_snake_case )
for _ in range(_snake_case ):
for i in range(_ % 2, arr_size - 1, 2 ):
if arr[i + 1] < arr[i]:
_lowercase , _lowercase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__UpperCamelCase : str = list(range(1_0, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''') | 227 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case : Any = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
__snake_case : Tuple = {
'input_ids': tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__snake_case : Dict = model(_a )['last_hidden_state']
__snake_case : int = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
__snake_case : List[Any] = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 243 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ['''keras_nlp''']
def __init__( self : Union[str, Any] ,*_a : List[Any] ,**_a : int ):
'''simple docstring'''
requires_backends(self ,['keras_nlp'] )
| 229 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a : Any = 16
a : Any = 32
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase = 16, __UpperCAmelCase = "bert-base-cased" ) -> List[str]:
'''simple docstring'''
snake_case_ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
snake_case_ = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=__UpperCAmelCase, max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case_ = datasets.map(
__UpperCAmelCase, batched=__UpperCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=__UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase, padding='''max_length''', max_length=128, return_tensors='''pt''' )
return tokenizer.pad(__UpperCAmelCase, padding='''longest''', return_tensors='''pt''' )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['''train'''], shuffle=__UpperCAmelCase, collate_fn=__UpperCAmelCase, batch_size=__UpperCAmelCase )
snake_case_ = DataLoader(
tokenized_datasets['''validation'''], shuffle=__UpperCAmelCase, collate_fn=__UpperCAmelCase, batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['''lr''']
snake_case_ = int(config['''num_epochs'''] )
snake_case_ = int(config['''seed'''] )
snake_case_ = int(config['''batch_size'''] )
snake_case_ = args.model_name_or_path
set_seed(__UpperCAmelCase )
snake_case_ ,snake_case_ = get_dataloaders(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase, return_dict=__UpperCAmelCase )
# Instantiate optimizer
snake_case_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case_ = optimizer_cls(params=model.parameters(), lr=__UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
snake_case_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
snake_case_ = 1
snake_case_ = (len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case_ = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase, num_warmup_steps=0, num_training_steps=__UpperCAmelCase, )
else:
snake_case_ = DummyScheduler(__UpperCAmelCase, total_num_steps=__UpperCAmelCase, warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = accelerator.prepare(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
snake_case_ = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case_ = 0
# Now we train the model
snake_case_ = evaluate.load('''glue''', '''mrpc''' )
snake_case_ = 0
snake_case_ = {}
for epoch in range(__UpperCAmelCase, __UpperCAmelCase ):
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case_ = 0
for step, batch in enumerate(__UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**__UpperCAmelCase )
snake_case_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case_ ,snake_case_ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__UpperCAmelCase ) - 1:
snake_case_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__UpperCAmelCase, references=__UpperCAmelCase, )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:", __UpperCAmelCase )
snake_case_ = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
snake_case_ = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir, '''all_results.json''' ), '''w''' ) as f:
json.dump(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''', type=__UpperCAmelCase, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=__UpperCAmelCase, )
parser.add_argument(
'''--output_dir''', type=__UpperCAmelCase, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', )
parser.add_argument(
'''--performance_lower_bound''', type=__UpperCAmelCase, default=__UpperCAmelCase, help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''', )
parser.add_argument(
'''--num_epochs''', type=__UpperCAmelCase, default=3, help='''Number of train epochs.''', )
snake_case_ = parser.parse_args()
snake_case_ = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase, __UpperCAmelCase )
if __name__ == "__main__":
main()
| 593 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a ( unittest.TestCase , _lowerCamelCase ):
def A_ ( self : List[str] ):
snake_case_ = load_tool('''text-to-speech''' )
self.tool.setup()
def A_ ( self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
snake_case_ = self.tool('''hey''' )
snake_case_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def A_ ( self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
snake_case_ = self.tool('''hey''' )
snake_case_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 593 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Optional[Any] =logging.get_logger(__name__)
__snake_case :Dict ={
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCAmelCase__ ( lowercase_ ):
A_ : Union[str, Any] = 'realm'
def __init__( self : str , __UpperCamelCase : List[Any]=30_522 , __UpperCamelCase : Dict=768 , __UpperCamelCase : Union[str, Any]=128 , __UpperCamelCase : Tuple=12 , __UpperCamelCase : Optional[Any]=12 , __UpperCamelCase : Optional[int]=8 , __UpperCamelCase : str=3_072 , __UpperCamelCase : List[str]="gelu_new" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=512 , __UpperCamelCase : int=2 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : List[Any]=1e-12 , __UpperCamelCase : str=256 , __UpperCamelCase : List[str]=10 , __UpperCamelCase : int=1e-3 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : Tuple=320 , __UpperCamelCase : List[str]=13_353_718 , __UpperCamelCase : Tuple=5_000 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : Union[str, Any]=2 , **__UpperCamelCase : List[Any] , ) -> List[Any]:
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
# Common config
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = retriever_proj_size
A = num_hidden_layers
A = num_attention_heads
A = num_candidates
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = type_vocab_size
A = layer_norm_eps
# Reader config
A = span_hidden_size
A = max_span_width
A = reader_layer_norm_eps
A = reader_beam_size
A = reader_seq_len
# Retrieval config
A = num_block_records
A = searcher_beam_size | 106 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__a : Union[str, Any] = logging.get_logger(__name__)
__a : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__a : Dict = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__a : List[str] = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = RobertaTokenizer
def __init__( self : List[Any] , UpperCamelCase_ : Any=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]="replace" , UpperCamelCase_ : Any="<s>" , UpperCamelCase_ : Tuple="</s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Dict="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Optional[int]="<pad>" , UpperCamelCase_ : str="<mask>" , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : Optional[Any]=True , **UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
__A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase_ ) != add_prefix_space:
__A = getattr(UpperCamelCase_ , pre_tok_state.pop("""type""" ) )
__A = add_prefix_space
__A = pre_tok_class(**UpperCamelCase_ )
__A = add_prefix_space
__A = """post_processor"""
__A = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
if tokenizer_component_instance:
__A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__A = tuple(state["""sep"""] )
if "cls" in state:
__A = tuple(state["""cls"""] )
__A = False
if state.get("""add_prefix_space""" , UpperCamelCase_ ) != add_prefix_space:
__A = add_prefix_space
__A = True
if state.get("""trim_offsets""" , UpperCamelCase_ ) != trim_offsets:
__A = trim_offsets
__A = True
if changes_to_apply:
__A = getattr(UpperCamelCase_ , state.pop("""type""" ) )
__A = component_class(**UpperCamelCase_ )
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
@property
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase_ ( self : str , UpperCamelCase_ : Tuple ):
"""simple docstring"""
__A = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value
__A = value
def lowerCAmelCase_ ( self : Tuple , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = kwargs.get("""is_split_into_words""" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Tuple ):
"""simple docstring"""
__A = kwargs.get("""is_split_into_words""" , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
"""simple docstring"""
__A = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : int=None ):
"""simple docstring"""
__A = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 637 | 0 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase_ : Tuple = _symbol_database.Default()
lowerCAmelCase_ : Optional[int] = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowerCAmelCase_ : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : List[Any] = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase_ : Tuple = 45
lowerCAmelCase_ : List[Any] = 1581
lowerCAmelCase_ : Tuple = 1517
lowerCAmelCase_ : List[Any] = 1570
lowerCAmelCase_ : str = 1584
lowerCAmelCase_ : Dict = 1793
lowerCAmelCase_ : List[Any] = 1795
lowerCAmelCase_ : Any = 1916
lowerCAmelCase_ : Dict = 1864
lowerCAmelCase_ : Dict = 1905
lowerCAmelCase_ : Optional[Any] = 1919
lowerCAmelCase_ : str = 2429
lowerCAmelCase_ : Union[str, Any] = 2208
lowerCAmelCase_ : Union[str, Any] = 2418
lowerCAmelCase_ : Union[str, Any] = 2323
lowerCAmelCase_ : Union[str, Any] = 2407
# @@protoc_insertion_point(module_scope)
| 464 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ : Tuple = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 464 | 1 |
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
UpperCAmelCase_ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ : Union[str, Any] = dict(zip(vocab, range(len(vocab))))
UpperCAmelCase_ : List[Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Union[str, Any] = Path(tmpdirname)
UpperCAmelCase_ : Any = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
UpperCAmelCase_ : Dict = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
UpperCAmelCase_ : Tuple = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
UpperCAmelCase_ : Dict = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCAmelCase_ : Tuple = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCAmelCase_ : Tuple = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
UpperCAmelCase_ : str = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
UpperCAmelCase_ : str = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 17 |
'''simple docstring'''
from math import factorial
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : float ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(lowerCamelCase ,lowerCamelCase ) or not isinstance(lowerCamelCase ,lowerCamelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_A : str = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_A : Any = float(factorial(lowerCamelCase ) )
coefficient /= factorial(lowerCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 128 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class _snake_case ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self: Optional[int] ,*lowerCamelCase_: Any ,**lowerCamelCase_: int ) -> Any:
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,lowerCamelCase_ ,)
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : int , a_ : List[str] , ):
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : Union[str, Any] = 13
lowerCAmelCase_ : List[Any] = 7
lowerCAmelCase_ : Tuple = 30
lowerCAmelCase_ : int = self.seq_length + self.mem_len
lowerCAmelCase_ : Optional[int] = 15
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : str = 99
lowerCAmelCase_ : str = [10, 50, 80]
lowerCAmelCase_ : Optional[Any] = 32
lowerCAmelCase_ : Optional[int] = 32
lowerCAmelCase_ : Any = 4
lowerCAmelCase_ : Tuple = 8
lowerCAmelCase_ : int = 1_28
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : Optional[int] = 2
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Union[str, Any] = 1
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Tuple = 3
lowerCAmelCase_ : Any = self.vocab_size - 1
lowerCAmelCase_ : Optional[Any] = 0.01
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : List[str] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase ( self : Dict ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase ( self : Optional[int] , a_ : Optional[Any] , a_ : Optional[int] , a_ : Tuple , a_ : str ):
lowerCAmelCase_ : List[Any] = TFTransfoXLModel(a_ )
lowerCAmelCase_ , lowerCAmelCase_ : str = model(a_ ).to_tuple()
lowerCAmelCase_ : Optional[Any] = {"input_ids": input_ids_a, "mems": mems_a}
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = model(a_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase ( self : int , a_ : Tuple , a_ : Optional[int] , a_ : Dict , a_ : Tuple ):
lowerCAmelCase_ : Optional[int] = TFTransfoXLLMHeadModel(a_ )
lowerCAmelCase_ , lowerCAmelCase_ : int = model(a_ ).to_tuple()
lowerCAmelCase_ : List[str] = {"input_ids": input_ids_a, "labels": lm_labels}
lowerCAmelCase_ , lowerCAmelCase_ : Any = model(a_ ).to_tuple()
lowerCAmelCase_ , lowerCAmelCase_ : int = model([input_ids_a, mems_a] ).to_tuple()
lowerCAmelCase_ : Optional[int] = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
lowerCAmelCase_ , lowerCAmelCase_ : Dict = model(a_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase ( self : Any , a_ : List[Any] , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any ):
lowerCAmelCase_ : List[str] = TFTransfoXLForSequenceClassification(a_ )
lowerCAmelCase_ : Tuple = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Any = self.prepare_config_and_inputs()
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) : Tuple = config_and_inputs
lowerCAmelCase_ : List[Any] = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
a_ : int = () if is_tf_available() else ()
a_ : Union[str, Any] = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
a_ : int = False
a_ : Optional[Any] = False
a_ : Dict = False
a_ : Any = False
def lowerCamelCase ( self : Dict , a_ : Optional[int] , a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : List[Any] = TFTransfoXLModelTester(self )
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=a_ , d_embed=37 )
def lowerCamelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str ):
self.model_tester.set_seed()
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a_ )
def lowerCamelCase ( self : Optional[int] ):
self.model_tester.set_seed()
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a_ )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a_ )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(a_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCAmelCase_ : str = model.get_output_embeddings()
assert isinstance(a_ , tf.keras.layers.Layer )
lowerCAmelCase_ : Tuple = model.get_bias()
assert name is None
else:
lowerCAmelCase_ : str = model.get_output_embeddings()
assert x is None
lowerCAmelCase_ : Union[str, Any] = model.get_bias()
assert name is None
def lowerCamelCase ( self : Any ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase ( self : Union[str, Any] ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[str] = TFTransfoXLModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def lowerCamelCase ( self : str ):
pass
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Tuple = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
lowerCAmelCase_ : Dict = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCAmelCase_ : Any = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCAmelCase_ : Dict = model.generate(a_ , max_length=2_00 , do_sample=a_ )
self.assertListEqual(output_ids[0].numpy().tolist() , a_ )
| 610 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCamelCase ( A__ , A__ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[Any] , *,
a_ : int = 4 , a_ : int = 7_68 , a_ : int , a_ : int , ):
super().__init__()
lowerCAmelCase_ : List[str] = nn.Parameter(torch.zeros(a_ ) )
# parameters for additional clip time embeddings
lowerCAmelCase_ : Dict = nn.Linear(a_ , a_ )
lowerCAmelCase_ : Union[str, Any] = nn.Linear(a_ , a_ )
# parameters for encoder hidden states
lowerCAmelCase_ : Dict = clip_extra_context_tokens
lowerCAmelCase_ : str = nn.Linear(
a_ , self.clip_extra_context_tokens * cross_attention_dim )
lowerCAmelCase_ : Any = nn.Linear(a_ , a_ )
lowerCAmelCase_ : Dict = nn.LayerNorm(a_ )
def lowerCamelCase ( self : int , *, a_ : Optional[Any] , a_ : Tuple , a_ : Union[str, Any] , a_ : Union[str, Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowerCAmelCase_ : Dict = image_embeddings.shape[0]
lowerCAmelCase_ : str = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowerCAmelCase_ : List[str] = classifier_free_guidance_embeddings.expand(
a_ , -1 )
lowerCAmelCase_ : int = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowerCAmelCase_ : int = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowerCAmelCase_ : Optional[int] = self.embedding_proj(a_ )
lowerCAmelCase_ : str = self.clip_image_embeddings_project_to_time_embeddings(a_ )
lowerCAmelCase_ : Union[str, Any] = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowerCAmelCase_ : Optional[Any] = self.clip_extra_context_tokens_proj(a_ )
lowerCAmelCase_ : List[str] = clip_extra_context_tokens.reshape(a_ , -1 , self.clip_extra_context_tokens )
lowerCAmelCase_ : List[str] = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowerCAmelCase_ : Dict = self.encoder_hidden_states_proj(a_ )
lowerCAmelCase_ : int = self.text_encoder_hidden_states_norm(a_ )
lowerCAmelCase_ : Any = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 610 | 1 |
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = ["torch", "torchsde"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["torch", "torchsde"] )
@classmethod
def A__ ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
@classmethod
def A__ ( cls , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch", "torchsde"] )
| 601 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = AutoencoderKL
lowerCAmelCase__ = "sample"
lowerCAmelCase__ = 1E-2
@property
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = 4
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
return {"sample": image}
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return (3, 32, 32)
@property
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return (3, 32, 32)
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def A__ ( self ) -> Any:
'''simple docstring'''
pass
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.prepare_init_args_and_inputs_for_common()
lowercase_ = self.model_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
assert not model.is_gradient_checkpointing and model.training
lowercase_ = model(**UpperCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase_ = torch.randn_like(UpperCAmelCase )
lowercase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase_ = self.model_class(**UpperCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(UpperCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase_ = model_a(**UpperCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
lowercase_ = dict(model.named_parameters() )
lowercase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase )
lowercase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
lowercase_ = model.to(UpperCAmelCase )
model.eval()
if torch_device == "mps":
lowercase_ = torch.manual_seed(0 )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(0 )
lowercase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase_ = image.to(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(UpperCAmelCase , sample_posterior=UpperCAmelCase , generator=UpperCAmelCase ).sample
lowercase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase_ = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
lowercase_ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
lowercase_ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return F'gaussian_noise_s={seed}_shape={"_".join([str(UpperCAmelCase ) for s in shape] )}.npy'
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self , UpperCAmelCase=0 , UpperCAmelCase=(4, 3, 512, 512) , UpperCAmelCase=False ) -> str:
'''simple docstring'''
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase , UpperCAmelCase ) ) ).to(UpperCAmelCase ).to(UpperCAmelCase )
return image
def A__ ( self , UpperCAmelCase="CompVis/stable-diffusion-v1-4" , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
lowercase_ = "fp16" if fpaa else None
lowercase_ = torch.floataa if fpaa else torch.floataa
lowercase_ = AutoencoderKL.from_pretrained(
UpperCAmelCase , subfolder="vae" , torch_dtype=UpperCAmelCase , revision=UpperCAmelCase , )
model.to(UpperCAmelCase ).eval()
return model
def A__ ( self , UpperCAmelCase=0 ) -> int:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(UpperCAmelCase )
return torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase )
lowercase_ = self.get_generator(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(UpperCAmelCase , generator=UpperCAmelCase , sample_posterior=UpperCAmelCase ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCAmelCase )
lowercase_ = self.get_sd_image(UpperCAmelCase , fpaa=UpperCAmelCase )
lowercase_ = self.get_generator(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(UpperCAmelCase , generator=UpperCAmelCase , sample_posterior=UpperCAmelCase ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model(UpperCAmelCase ).sample
assert sample.shape == image.shape
lowercase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase_ = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCAmelCase )
lowercase_ = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase )
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ = torch.tensor(UpperCAmelCase )
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def A__ ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model(fpaa=UpperCAmelCase )
lowercase_ = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) , fpaa=UpperCAmelCase )
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ = model.decode(UpperCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.get_sd_vae_model()
lowercase_ = self.get_sd_image(UpperCAmelCase )
lowercase_ = self.get_generator(UpperCAmelCase )
with torch.no_grad():
lowercase_ = model.encode(UpperCAmelCase ).latent_dist
lowercase_ = dist.sample(generator=UpperCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase_ = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase_ = torch.tensor(UpperCAmelCase )
lowercase_ = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , atol=UpperCAmelCase )
| 601 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Tuple = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
A__ = []
def generate(lowercase_ , lowercase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A__ , A__ = arr[k - 1], arr[i]
else: # k is odd
A__ , A__ = arr[k - 1], arr[0]
generate(k - 1 , lowercase_ )
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
_lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase : str = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 87 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowerCamelCase__ = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def A(__a: Tuple=True ):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowercase ) )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = None
lowerCamelCase__ = None
def __a ( self , _a , _a ) -> int:
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = dataset_module_factory(_a , cache_dir=_a )
lowerCAmelCase_ = import_main_class(dataset_module.module_path , dataset=_a )
lowerCAmelCase_ = builder_cls(
cache_dir=_a , config_name=_a , hash=dataset_module.hash , )
lowerCAmelCase_ = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_a ).replace(os.sep , "/" ),
config.DATASET_INFO_FILENAME,
] )
lowerCAmelCase_ = cached_path(_a , cache_dir=_a )
self.assertTrue(os.path.exists(_a ) )
@pytest.mark.integration
def A(__a: Any ):
lowerCAmelCase_ = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
lowerCAmelCase_ = dataset_module_factory("wikipedia" , cache_dir=__a )
lowerCAmelCase_ = import_main_class(dataset_module.module_path )
lowerCAmelCase_ = builder_cls(
cache_dir=__a , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCAmelCase_ = None
builder_instance.download_and_prepare()
lowerCAmelCase_ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def A(__a: Optional[int] ):
lowerCAmelCase_ = dataset_module_factory("wikipedia" , cache_dir=__a )
lowerCAmelCase_ = import_main_class(dataset_module.module_path , dataset=__a )
lowerCAmelCase_ = builder_cls(
cache_dir=__a , config_name="20220301.frr" , hash=dataset_module.hash , )
lowerCAmelCase_ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__a , __a )
assert "train" in ds
assert isinstance(ds["train"] , __a )
assert next(iter(ds["train"] ) )
| 226 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowerCamelCase__ = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
lowerCamelCase__ = '''▁'''
class __magic_name__ (__lowercase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase_ = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
lowerCAmelCase_ = do_lower_case
lowerCAmelCase_ = remove_space
lowerCAmelCase_ = keep_accents
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __a ( self ) -> Dict:
return len(self.sp_model )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self , _a ) -> Tuple:
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self , _a ) -> str:
if self.remove_space:
lowerCAmelCase_ = " ".join(inputs.strip().split() )
else:
lowerCAmelCase_ = inputs
lowerCAmelCase_ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCAmelCase_ = unicodedata.normalize("NFKD" , _a )
lowerCAmelCase_ = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
lowerCAmelCase_ = outputs.lower()
return outputs
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = self.preprocess_text(_a )
lowerCAmelCase_ = self.sp_model.encode(_a , out_type=_a )
lowerCAmelCase_ = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase_ = cur_pieces[1:]
else:
lowerCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def __a ( self , _a ) -> Optional[Any]:
return self.sp_model.PieceToId(_a )
def __a ( self , _a ) -> Optional[int]:
return self.sp_model.IdToPiece(_a )
def __a ( self , _a ) -> Optional[int]:
lowerCAmelCase_ = []
lowerCAmelCase_ = ""
lowerCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
lowerCAmelCase_ = True
lowerCAmelCase_ = []
else:
current_sub_tokens.append(_a )
lowerCAmelCase_ = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase_ = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 226 | 1 |
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
lowercase__ :Optional[int] = None
lowercase__ :Any = {
'7B': 1_1_0_0_8,
'13B': 1_3_8_2_4,
'30B': 1_7_9_2_0,
'65B': 2_2_0_1_6,
'70B': 2_8_6_7_2,
}
lowercase__ :str = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=1 , UpperCAmelCase_=2_56 ) ->str:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCamelCase_ ( UpperCAmelCase_ ) ->List[Any]:
"""simple docstring"""
with open(UpperCAmelCase_ , '''r''' ) as f:
return json.load(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->str:
"""simple docstring"""
with open(UpperCAmelCase_ , '''w''' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=True ) ->List[str]:
"""simple docstring"""
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
__UpperCAmelCase : str = os.path.join(UpperCAmelCase_ , '''tmp''' )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
__UpperCAmelCase : Any = read_json(os.path.join(UpperCAmelCase_ , '''params.json''' ) )
__UpperCAmelCase : Dict = NUM_SHARDS[model_size]
__UpperCAmelCase : Dict = params['''n_layers''']
__UpperCAmelCase : Any = params['''n_heads''']
__UpperCAmelCase : int = n_heads // num_shards
__UpperCAmelCase : Union[str, Any] = params['''dim''']
__UpperCAmelCase : Optional[Any] = dim // n_heads
__UpperCAmelCase : Union[str, Any] = 10_000.0
__UpperCAmelCase : List[str] = 1.0 / (base ** (torch.arange(0 , UpperCAmelCase_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
__UpperCAmelCase : Optional[int] = params['''n_kv_heads'''] # for GQA / MQA
__UpperCAmelCase : int = n_heads_per_shard // num_key_value_heads
__UpperCAmelCase : str = dim // num_key_value_heads
else: # compatibility with other checkpoints
__UpperCAmelCase : str = n_heads
__UpperCAmelCase : Union[str, Any] = n_heads_per_shard
__UpperCAmelCase : List[str] = dim
# permute for sliced rotary
def permute(UpperCAmelCase_ , UpperCAmelCase_=n_heads , UpperCAmelCase_=dim , UpperCAmelCase_=dim ):
return w.view(UpperCAmelCase_ , dima // n_heads // 2 , 2 , UpperCAmelCase_ ).transpose(1 , 2 ).reshape(UpperCAmelCase_ , UpperCAmelCase_ )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
__UpperCAmelCase : List[Any] = torch.load(os.path.join(UpperCAmelCase_ , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
__UpperCAmelCase : Union[str, Any] = [
torch.load(os.path.join(UpperCAmelCase_ , f'''consolidated.{i:02d}.pth''' ) , map_location='''cpu''' )
for i in range(UpperCAmelCase_ )
]
__UpperCAmelCase : str = 0
__UpperCAmelCase : Optional[Any] = {'''weight_map''': {}}
for layer_i in range(UpperCAmelCase_ ):
__UpperCAmelCase : str = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__UpperCAmelCase : str = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
__UpperCAmelCase : int = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
__UpperCAmelCase : Optional[int] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for i in range(UpperCAmelCase_ )
] , dim=0 , ).reshape(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : Any = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for i in range(UpperCAmelCase_ )
] , dim=0 , ).reshape(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
__UpperCAmelCase : Tuple = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for i in range(UpperCAmelCase_ )
] , dim=0 , ).reshape(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : List[str] = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(UpperCAmelCase_ )] , dim=1 )
__UpperCAmelCase : Tuple = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(UpperCAmelCase_ )] , dim=0 )
__UpperCAmelCase : Dict = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(UpperCAmelCase_ )] , dim=1 )
__UpperCAmelCase : Union[str, Any] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(UpperCAmelCase_ )] , dim=0 )
__UpperCAmelCase : Tuple = inv_freq
for k, v in state_dict.items():
__UpperCAmelCase : Union[str, Any] = filename
param_count += v.numel()
torch.save(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : List[str] = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
__UpperCAmelCase : str = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
__UpperCAmelCase : Any = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(UpperCAmelCase_ )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(UpperCAmelCase_ )] , dim=0 ),
}
for k, v in state_dict.items():
__UpperCAmelCase : List[Any] = filename
param_count += v.numel()
torch.save(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
# Write configs
__UpperCAmelCase : str = {'''total_size''': param_count * 2}
write_json(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''pytorch_model.bin.index.json''' ) )
__UpperCAmelCase : Union[str, Any] = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
__UpperCAmelCase : int = params['''multiple_of'''] if '''multiple_of''' in params else 2_56
__UpperCAmelCase : List[Any] = LlamaConfig(
hidden_size=UpperCAmelCase_ , intermediate_size=compute_intermediate_size(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=UpperCAmelCase_ , )
config.save_pretrained(UpperCAmelCase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
__UpperCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained(UpperCAmelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=UpperCAmelCase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(UpperCAmelCase_ , safe_serialization=UpperCAmelCase_ )
shutil.rmtree(UpperCAmelCase_ )
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ) ->str:
"""simple docstring"""
__UpperCAmelCase : Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
__UpperCAmelCase : str = tokenizer_class(UpperCAmelCase_ )
tokenizer.save_pretrained(UpperCAmelCase_ )
def lowerCamelCase_ ( ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=UpperCAmelCase_ , help='''Whether or not to save using `safetensors`.''' )
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
__UpperCAmelCase : Any = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , UpperCAmelCase_ )
if __name__ == "__main__":
main() | 522 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowercase__ :List[Any] = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Any = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
__UpperCAmelCase : Any = downstream_dict['''projector.weight''']
__UpperCAmelCase : List[Any] = downstream_dict['''projector.bias''']
__UpperCAmelCase : str = downstream_dict['''model.post_net.linear.weight''']
__UpperCAmelCase : Dict = downstream_dict['''model.post_net.linear.bias''']
return model
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
__UpperCAmelCase : str = downstream_dict['''model.linear.weight''']
__UpperCAmelCase : List[Any] = downstream_dict['''model.linear.bias''']
return model
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : Tuple = WavaVecaForXVector.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = downstream_dict['''connector.weight''']
__UpperCAmelCase : Optional[Any] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCAmelCase : List[str] = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__UpperCAmelCase : Optional[int] = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__UpperCAmelCase : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__UpperCAmelCase : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__UpperCAmelCase : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__UpperCAmelCase : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__UpperCAmelCase : Optional[Any] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : str = torch.load(UpperCAmelCase_ , map_location='''cpu''' )
__UpperCAmelCase : List[Any] = checkpoint['''Downstream''']
__UpperCAmelCase : int = WavaVecaConfig.from_pretrained(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ )
__UpperCAmelCase : int = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__UpperCAmelCase : int = convert_classification(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__UpperCAmelCase : Optional[int] = convert_diarization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith('''ForXVector''' ):
__UpperCAmelCase : int = convert_xvector(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__UpperCAmelCase : Tuple = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowercase__ :Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 522 | 1 |
import os
import time
import numpy as np
import onnxruntime as ort
UpperCAmelCase = """1"""
UpperCAmelCase = """0"""
UpperCAmelCase = """1"""
UpperCAmelCase = ort.SessionOptions()
UpperCAmelCase = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("""Create inference session...""")
UpperCAmelCase = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""]
UpperCAmelCase = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider)
UpperCAmelCase = ort.RunOptions()
UpperCAmelCase = 128
UpperCAmelCase = 1
UpperCAmelCase = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase = np.ones((batch, sequence), dtype=np.intaa)
UpperCAmelCase = np.ones((batch, sequence), dtype=np.intaa)
print("""Warm up phase...""")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Start inference...""")
UpperCAmelCase = time.time()
UpperCAmelCase = 2000
UpperCAmelCase = {}
for iter in range(max_iters):
UpperCAmelCase = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1000 / max_iters))
| 709 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = "informer"
__snake_case = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "student_t" , _UpperCAmelCase = "nll" , _UpperCAmelCase = 1 , _UpperCAmelCase = None , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 64 , _UpperCAmelCase = 32 , _UpperCAmelCase = 32 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = True , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.05 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 1_00 , _UpperCAmelCase = 0.02 , _UpperCAmelCase=True , _UpperCAmelCase = "prob" , _UpperCAmelCase = 5 , _UpperCAmelCase = True , **_UpperCAmelCase , ):
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = cardinality
else:
snake_case_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
# Informer
snake_case_ = attention_type
snake_case_ = sampling_factor
snake_case_ = distil
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def UpperCamelCase__ ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 531 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class a__ :
def __init__( self : str ,a__ : Optional[Any] ,a__ : Tuple=13 ,a__ : Any=7 ,a__ : Union[str, Any]=6 ,a__ : int=17 ,a__ : List[str]=23 ,a__ : List[str]=11 ,a__ : Optional[Any]=True ,) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[int] = parent
_lowerCAmelCase:List[Any] = batch_size
_lowerCAmelCase:Dict = seq_length
_lowerCAmelCase:List[str] = act_dim
_lowerCAmelCase:Optional[Any] = state_dim
_lowerCAmelCase:int = hidden_size
_lowerCAmelCase:int = max_length
_lowerCAmelCase:Union[str, Any] = is_training
def __UpperCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_lowerCAmelCase:str = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
_lowerCAmelCase:List[str] = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
_lowerCAmelCase:Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1))
_lowerCAmelCase:List[str] = floats_tensor((self.batch_size, self.seq_length, 1))
_lowerCAmelCase:Any = ids_tensor((self.batch_size, self.seq_length) ,vocab_size=1000)
_lowerCAmelCase:str = random_attention_mask((self.batch_size, self.seq_length))
_lowerCAmelCase:Dict = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __UpperCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size ,seq_length=self.seq_length ,act_dim=self.act_dim ,state_dim=self.state_dim ,hidden_size=self.hidden_size ,max_length=self.max_length ,)
def __UpperCamelCase ( self : Optional[int] ,a__ : Tuple ,a__ : Dict ,a__ : int ,a__ : Dict ,a__ : Dict ,a__ : Union[str, Any] ,a__ : List[str] ,) -> str:
"""simple docstring"""
_lowerCAmelCase:int = DecisionTransformerModel(config=a__)
model.to(a__)
model.eval()
_lowerCAmelCase:Optional[int] = model(a__ ,a__ ,a__ ,a__ ,a__ ,a__)
self.parent.assertEqual(result.state_preds.shape ,states.shape)
self.parent.assertEqual(result.action_preds.shape ,actions.shape)
self.parent.assertEqual(result.return_preds.shape ,returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def __UpperCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:Dict = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
):List[Any] = config_and_inputs
_lowerCAmelCase:Tuple = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class a__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
snake_case__ = (DecisionTransformerModel,) if is_torch_available() else ()
snake_case__ = ()
snake_case__ = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
snake_case__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def __UpperCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase:List[str] = DecisionTransformerModelTester(self)
_lowerCAmelCase:List[Any] = ConfigTester(self ,config_class=a__ ,hidden_size=37)
def __UpperCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
@slow
def __UpperCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase:Union[str, Any] = DecisionTransformerModel.from_pretrained(a__)
self.assertIsNotNone(a__)
def __UpperCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase:Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase:Union[str, Any] = model_class(a__)
_lowerCAmelCase:Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase:Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase:Union[str, Any] = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(a__)] ,a__)
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_lowerCAmelCase:List[Any] = 2 # number of steps of autoregressive prediction we will perform
_lowerCAmelCase:int = 10 # defined by the RL environment, may be normalized
_lowerCAmelCase:Optional[int] = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''')
_lowerCAmelCase:Any = model.to(a__)
_lowerCAmelCase:Dict = model.config
torch.manual_seed(0)
_lowerCAmelCase:List[str] = torch.randn(1 ,1 ,config.state_dim).to(device=a__ ,dtype=torch.floataa) # env.reset()
_lowerCAmelCase:int = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] ,device=a__)
_lowerCAmelCase:str = torch.tensor(a__ ,device=a__ ,dtype=torch.floataa).reshape(1 ,1 ,1)
_lowerCAmelCase:Tuple = state
_lowerCAmelCase:Any = torch.zeros(1 ,0 ,config.act_dim ,device=a__ ,dtype=torch.floataa)
_lowerCAmelCase:Any = torch.zeros(1 ,0 ,device=a__ ,dtype=torch.floataa)
_lowerCAmelCase:Dict = torch.tensor(0 ,device=a__ ,dtype=torch.long).reshape(1 ,1)
for step in range(a__):
_lowerCAmelCase:Any = torch.cat([actions, torch.zeros(1 ,1 ,config.act_dim ,device=a__)] ,dim=1)
_lowerCAmelCase:Tuple = torch.cat([rewards, torch.zeros(1 ,1 ,device=a__)] ,dim=1)
_lowerCAmelCase:int = torch.ones(1 ,states.shape[1]).to(dtype=torch.long ,device=states.device)
with torch.no_grad():
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:List[str] = model(
states=a__ ,actions=a__ ,rewards=a__ ,returns_to_go=a__ ,timesteps=a__ ,attention_mask=a__ ,return_dict=a__ ,)
self.assertEqual(action_pred.shape ,actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] ,expected_outputs[step] ,atol=1E-4))
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:str = ( # env.step(action)
torch.randn(1 ,1 ,config.state_dim).to(device=a__ ,dtype=torch.floataa),
1.0,
False,
{},
)
_lowerCAmelCase:int = action_pred[0, -1]
_lowerCAmelCase:Optional[int] = torch.cat([states, state] ,dim=1)
_lowerCAmelCase:List[str] = returns_to_go[0, -1] - reward
_lowerCAmelCase:str = torch.cat([returns_to_go, pred_return.reshape(1 ,1 ,1)] ,dim=1)
_lowerCAmelCase:int = torch.cat(
[timesteps, torch.ones((1, 1) ,device=a__ ,dtype=torch.long) * (step + 1)] ,dim=1)
| 227 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
UpperCamelCase__ = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def UpperCAmelCase ( snake_case : str = "dhaka" , snake_case : int = 5 ):
_lowerCAmelCase:Tuple = min(snake_case , 50 ) # Prevent abuse!
_lowerCAmelCase:Dict = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
_lowerCAmelCase:List[Any] = requests.get('''https://www.google.com/search''' , params=snake_case , headers=snake_case )
_lowerCAmelCase:int = BeautifulSoup(html.text , '''html.parser''' )
_lowerCAmelCase:Tuple = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
_lowerCAmelCase:str = json.dumps(snake_case )
_lowerCAmelCase:Optional[Any] = json.loads(snake_case )
_lowerCAmelCase:int = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , snake_case , )
if not matched_google_image_data:
return 0
_lowerCAmelCase:Tuple = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(snake_case ) , )
_lowerCAmelCase:str = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , snake_case , )
for index, fixed_full_res_image in enumerate(snake_case ):
if index >= max_images:
return index
_lowerCAmelCase:List[str] = bytes(snake_case , '''ascii''' ).decode(
'''unicode-escape''' )
_lowerCAmelCase:str = bytes(snake_case , '''ascii''' ).decode(
'''unicode-escape''' )
_lowerCAmelCase:Dict = urllib.request.build_opener()
_lowerCAmelCase:int = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(snake_case )
_lowerCAmelCase:str = F'query_{query.replace(" " , "_" )}'
if not os.path.exists(snake_case ):
os.makedirs(snake_case )
urllib.request.urlretrieve( # noqa: S310
snake_case , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
UpperCamelCase__ = download_images_from_google_query(sys.argv[1])
print(F"{image_count} images were downloaded to disk.")
except IndexError:
print('''Please provide a search term.''')
raise
| 227 | 1 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowercase_ ( yaml.SafeLoader ):
def _snake_case ( self , __A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : str =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : Tuple =[tuple(__A ) if isinstance(__A , __A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Tuple =Counter(__A )
SCREAMING_SNAKE_CASE_ : Tuple =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def _snake_case ( self , __A , __A=False ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str =super().construct_mapping(__A , deep=__A )
self._check_no_duplicates_on_constructed_node(__A )
return mapping
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> Tuple[Optional[str], str]:
SCREAMING_SNAKE_CASE_ : Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : Tuple =full_content[1:].index('''---''' ) + 1
SCREAMING_SNAKE_CASE_ : str ='''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCAmelCase_ )
class lowercase_ ( A ):
# class attributes
__lowerCamelCase = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def _snake_case ( cls , __A ) -> "DatasetMetadata":
with open(__A , encoding='''utf-8''' ) as readme_file:
SCREAMING_SNAKE_CASE_ : str =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__A )
else:
return cls()
def _snake_case ( self , __A ) -> List[str]:
if path.exists():
with open(__A , encoding='''utf-8''' ) as readme_file:
SCREAMING_SNAKE_CASE_ : int =readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =None
SCREAMING_SNAKE_CASE_ : str =self._to_readme(__A )
with open(__A , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__A )
def _snake_case ( self , __A = None ) -> str:
if readme_content is not None:
SCREAMING_SNAKE_CASE_ : int =_split_yaml_from_readme(__A )
SCREAMING_SNAKE_CASE_ : Any ='''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] ='''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def _snake_case ( cls , __A ) -> "DatasetMetadata":
SCREAMING_SNAKE_CASE_ : Optional[int] =yaml.load(__A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : List[Any] ={
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__A )
def _snake_case ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__A , allow_unicode=__A , encoding='''utf-8''' , ).decode('''utf-8''' )
_lowercase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowercase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_lowercase = ap.parse_args()
_lowercase = Path(args.readme_filepath)
_lowercase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 708 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class lowercase_ :
__lowerCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCamelCase = field(
default=A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCamelCase = field(
default=A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCamelCase = field(
default=A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowercase_ :
__lowerCamelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
__lowerCamelCase = field(metadata={"help": "Should contain the data files for the task."} )
__lowerCamelCase = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowerCamelCase = field(
default=A , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ : Optional[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ : List[Any] =processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ : Optional[Any] =processor.get_labels()
SCREAMING_SNAKE_CASE_ : int =len(UpperCAmelCase_ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ : Optional[Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE_ : List[str] =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE_ : int =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE_ : List[Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase_ : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE_ : Tuple =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ : Dict =DataCollatorWithPadding(UpperCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ : str =Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ : Dict ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ : Dict =trainer.evaluate()
SCREAMING_SNAKE_CASE_ : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(UpperCAmelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , UpperCAmelCase_ , UpperCAmelCase_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(UpperCAmelCase_ )
return results
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 431 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __UpperCamelCase ( lowercase__ : Optional[int] ):
'''simple docstring'''
__lowercase =SwinConfig()
__lowercase =swin_name.split('_' )
__lowercase =name_split[1]
__lowercase =int(name_split[4] )
__lowercase =int(name_split[3][-1] )
if model_size == "tiny":
__lowercase =96
__lowercase =(2, 2, 6, 2)
__lowercase =(3, 6, 12, 24)
elif model_size == "small":
__lowercase =96
__lowercase =(2, 2, 18, 2)
__lowercase =(3, 6, 12, 24)
elif model_size == "base":
__lowercase =1_28
__lowercase =(2, 2, 18, 2)
__lowercase =(4, 8, 16, 32)
else:
__lowercase =1_92
__lowercase =(2, 2, 18, 2)
__lowercase =(6, 12, 24, 48)
if "in22k" in swin_name:
__lowercase =2_18_41
else:
__lowercase =10_00
__lowercase ="huggingface/label-files"
__lowercase ="imagenet-1k-id2label.json"
__lowercase =json.load(open(hf_hub_download(lowercase__, lowercase__, repo_type='dataset' ), 'r' ) )
__lowercase ={int(lowercase__ ): v for k, v in idalabel.items()}
__lowercase =idalabel
__lowercase ={v: k for k, v in idalabel.items()}
__lowercase =img_size
__lowercase =num_classes
__lowercase =embed_dim
__lowercase =depths
__lowercase =num_heads
__lowercase =window_size
return config
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if "patch_embed.proj" in name:
__lowercase =name.replace('patch_embed.proj', 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase =name.replace('patch_embed.norm', 'embeddings.norm' )
if "layers" in name:
__lowercase ="encoder." + name
if "attn.proj" in name:
__lowercase =name.replace('attn.proj', 'attention.output.dense' )
if "attn" in name:
__lowercase =name.replace('attn', 'attention.self' )
if "norm1" in name:
__lowercase =name.replace('norm1', 'layernorm_before' )
if "norm2" in name:
__lowercase =name.replace('norm2', 'layernorm_after' )
if "mlp.fc1" in name:
__lowercase =name.replace('mlp.fc1', 'intermediate.dense' )
if "mlp.fc2" in name:
__lowercase =name.replace('mlp.fc2', 'output.dense' )
if name == "norm.weight":
__lowercase ="layernorm.weight"
if name == "norm.bias":
__lowercase ="layernorm.bias"
if "head" in name:
__lowercase =name.replace('head', 'classifier' )
else:
__lowercase ="swin." + name
return name
def __UpperCamelCase ( lowercase__ : List[Any], lowercase__ : List[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowercase =orig_state_dict.pop(lowercase__ )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase =key.split('.' )
__lowercase =int(key_split[1] )
__lowercase =int(key_split[3] )
__lowercase =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase =val[:dim, :]
__lowercase =val[
dim : dim * 2, :
]
__lowercase =val[-dim:, :]
else:
__lowercase =val[
:dim
]
__lowercase =val[
dim : dim * 2
]
__lowercase =val[
-dim:
]
else:
__lowercase =val
return orig_state_dict
def __UpperCamelCase ( lowercase__ : Tuple, lowercase__ : Any ):
'''simple docstring'''
__lowercase =timm.create_model(lowercase__, pretrained=lowercase__ )
timm_model.eval()
__lowercase =get_swin_config(lowercase__ )
__lowercase =SwinForImageClassification(lowercase__ )
model.eval()
__lowercase =convert_state_dict(timm_model.state_dict(), lowercase__ )
model.load_state_dict(lowercase__ )
__lowercase ="http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase =AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_', '-' ) ) )
__lowercase =Image.open(requests.get(lowercase__, stream=lowercase__ ).raw )
__lowercase =image_processor(images=lowercase__, return_tensors='pt' )
__lowercase =timm_model(inputs['pixel_values'] )
__lowercase =model(**lowercase__ ).logits
assert torch.allclose(lowercase__, lowercase__, atol=1E-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCAmelCase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 119 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""PoolFormerFeatureExtractor"""]
__snake_case = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 178 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class lowercase__ ( lowercase__ ):
a_ ="""unispeech-sat"""
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=(512, 512, 512, 512, 1500) , __UpperCAmelCase=(5, 3, 3, 1, 1) , __UpperCAmelCase=(1, 2, 3, 1, 1) , __UpperCAmelCase=512 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=504 , **__UpperCAmelCase , )-> Optional[Any]:
'''simple docstring'''
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = feat_extract_norm
lowerCAmelCase__ = feat_extract_activation
lowerCAmelCase__ = list(__lowercase )
lowerCAmelCase__ = list(__lowercase )
lowerCAmelCase__ = list(__lowercase )
lowerCAmelCase__ = conv_bias
lowerCAmelCase__ = num_conv_pos_embeddings
lowerCAmelCase__ = num_conv_pos_embedding_groups
lowerCAmelCase__ = len(self.conv_dim )
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = feat_proj_dropout
lowerCAmelCase__ = final_dropout
lowerCAmelCase__ = layerdrop
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = num_clusters
lowerCAmelCase__ = do_stable_layer_norm
lowerCAmelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ = apply_spec_augment
lowerCAmelCase__ = mask_time_prob
lowerCAmelCase__ = mask_time_length
lowerCAmelCase__ = mask_time_min_masks
lowerCAmelCase__ = mask_feature_prob
lowerCAmelCase__ = mask_feature_length
lowerCAmelCase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ = num_codevectors_per_group
lowerCAmelCase__ = num_codevector_groups
lowerCAmelCase__ = contrastive_logits_temperature
lowerCAmelCase__ = feat_quantizer_dropout
lowerCAmelCase__ = num_negatives
lowerCAmelCase__ = codevector_dim
lowerCAmelCase__ = proj_codevector_dim
lowerCAmelCase__ = diversity_loss_weight
# ctc loss
lowerCAmelCase__ = ctc_loss_reduction
lowerCAmelCase__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ = list(__lowercase )
lowerCAmelCase__ = list(__lowercase )
lowerCAmelCase__ = list(__lowercase )
lowerCAmelCase__ = xvector_output_dim
@property
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 715 |
from collections import defaultdict
from math import ceil, sqrt
def _a ( UpperCamelCase_ : int = 1_000_000 , UpperCamelCase_ : int = 10 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 115 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : List[Any] = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="biogpt"
def __init__( self : Union[str, Any] , a__ : List[Any]=42384 , a__ : List[Any]=1024 , a__ : Optional[Any]=24 , a__ : Tuple=16 , a__ : str=4096 , a__ : int="gelu" , a__ : List[str]=0.1 , a__ : List[str]=0.1 , a__ : Any=1024 , a__ : Dict=0.02 , a__ : Any=1e-1_2 , a__ : Union[str, Any]=True , a__ : Tuple=True , a__ : Optional[Any]=0.0 , a__ : Tuple=0.0 , a__ : List[Any]=1 , a__ : Tuple=0 , a__ : List[str]=2 , **a__ : Any , ):
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = scale_embedding
UpperCAmelCase = use_cache
UpperCAmelCase = layerdrop
UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
| 51 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCamelCase__ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class __magic_name__ (unittest.TestCase ):
def __a ( self , _a , _a = None , _a = None , _a = None , _a = True , ) -> int:
lowerCAmelCase_ = [file for file in os.listdir(_a ) if os.path.isfile(os.path.join(_a , _a ) )]
if identifier is not None:
lowerCAmelCase_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_a , _a ):
for n_ in n_identifier:
lowerCAmelCase_ = [file for file in files if n_ not in file]
else:
lowerCAmelCase_ = [file for file in files if n_identifier not in file]
lowerCAmelCase_ = ignore_files or []
ignore_files.append("__init__.py" )
lowerCAmelCase_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , _a )
if only_modules:
lowerCAmelCase_ = file.split("." )[0]
try:
lowerCAmelCase_ = getattr(_a , _a )
lowerCAmelCase_ = doctest.DocTestSuite(_a )
lowerCAmelCase_ = unittest.TextTestRunner().run(_a )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"{module_identifier} is not a module." )
else:
lowerCAmelCase_ = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __a ( self ) -> int:
lowerCAmelCase_ = Path("src/transformers" )
lowerCAmelCase_ = "modeling"
lowerCAmelCase_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(_a , identifier=_a , ignore_files=_a )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = Path("src/transformers" )
lowerCAmelCase_ = "tokenization"
self.analyze_directory(_a , identifier=_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = Path("src/transformers" )
lowerCAmelCase_ = "configuration"
self.analyze_directory(_a , identifier=_a )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = Path("src/transformers" )
lowerCAmelCase_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(_a , n_identifier=_a )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = Path("docs/source" )
lowerCAmelCase_ = ["favicon.ico"]
self.analyze_directory(_a , ignore_files=_a , only_modules=_a )
| 122 | 0 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ("""dense.weight""", """attention.self.query""", """attention.self.key""", """attention.self.value""")
UpperCAmelCase__ : Any = (
("""layer.""", """layer_"""),
("""word_embeddings.weight""", """word_embeddings"""),
("""position_embeddings.weight""", """position_embeddings"""),
("""token_type_embeddings.weight""", """token_type_embeddings"""),
(""".""", """/"""),
("""LayerNorm/weight""", """LayerNorm/gamma"""),
("""LayerNorm/bias""", """LayerNorm/beta"""),
("""weight""", """kernel"""),
)
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = model.state_dict()
def to_tf_var_name(__UpperCamelCase ):
for patt, repl in iter(__UpperCamelCase ):
UpperCAmelCase__ : int = name.replace(__UpperCamelCase , __UpperCamelCase )
return F"bert/{name}"
def create_tf_var(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase__ : str = tf.get_variable(dtype=__UpperCamelCase , shape=tensor.shape , name=__UpperCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__UpperCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase__ : int = to_tf_var_name(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase__ : int = torch_tensor.T
UpperCAmelCase__ : Any = create_tf_var(tensor=__UpperCamelCase , name=__UpperCamelCase , session=__UpperCamelCase )
tf.keras.backend.set_value(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Dict = session.run(__UpperCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(__UpperCamelCase , __UpperCamelCase )}" )
UpperCAmelCase__ : Optional[int] = tf.train.Saver(tf.trainable_variables() )
saver.save(__UpperCamelCase , os.path.join(__UpperCamelCase , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def lowerCAmelCase ( __UpperCamelCase=None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""Directory in which to save tensorflow model""" )
UpperCAmelCase__ : int = parser.parse_args(__UpperCamelCase )
UpperCAmelCase__ : Dict = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__UpperCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 194 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """markuplm"""
def __init__( self : str ,A : List[Any]=30_522 ,A : Tuple=768 ,A : str=12 ,A : int=12 ,A : int=3_072 ,A : Optional[int]="gelu" ,A : Optional[int]=0.1 ,A : Optional[int]=0.1 ,A : Any=512 ,A : Any=2 ,A : str=0.0_2 ,A : int=1e-12 ,A : int=0 ,A : str=0 ,A : List[Any]=2 ,A : List[str]=256 ,A : Union[str, Any]=1_024 ,A : List[Any]=216 ,A : Any=1_001 ,A : Optional[int]=32 ,A : Any=50 ,A : int="absolute" ,A : Dict=True ,A : int=None ,**A : Optional[int] ,):
'''simple docstring'''
super().__init__(
pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A ,)
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : str = layer_norm_eps
UpperCAmelCase__ : Any = position_embedding_type
UpperCAmelCase__ : int = use_cache
UpperCAmelCase__ : List[str] = classifier_dropout
# additional properties
UpperCAmelCase__ : Optional[int] = max_depth
UpperCAmelCase__ : List[str] = max_xpath_tag_unit_embeddings
UpperCAmelCase__ : Any = max_xpath_subs_unit_embeddings
UpperCAmelCase__ : str = tag_pad_id
UpperCAmelCase__ : Dict = subs_pad_id
UpperCAmelCase__ : List[str] = xpath_unit_hidden_size
| 194 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
_UpperCAmelCase = 0
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_UpperCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_UpperCAmelCase = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
_UpperCAmelCase = 0
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_UpperCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a : int = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = ["""ViTFeatureExtractor"""]
__a : List[str] = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[Any] = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 | from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Tuple = ['''image_processor''', '''tokenizer''']
__a : Dict = '''AutoImageProcessor'''
__a : List[Any] = '''AutoTokenizer'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = self.image_processor
def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
__lowercase = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"] | 522 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a__ ( ):
'''simple docstring'''
__magic_name__ = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
__magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ).convert("""RGB""" )
return image
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = dct.pop(A_ )
__magic_name__ = val
def a__ ( A_, A_ ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__magic_name__ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__magic_name__ = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__magic_name__ = torch.cat((q_bias, torch.zeros_like(A_, requires_grad=A_ ), v_bias) )
__magic_name__ = qkv_bias
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = 364 if """coco""" in model_name else 224
__magic_name__ = BlipaVisionConfig(image_size=A_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__magic_name__ = OPTConfig.from_pretrained("""facebook/opt-2.7b""", eos_token_id=A_ ).to_dict()
elif "opt-6.7b" in model_name:
__magic_name__ = OPTConfig.from_pretrained("""facebook/opt-6.7b""", eos_token_id=A_ ).to_dict()
elif "t5-xl" in model_name:
__magic_name__ = TaConfig.from_pretrained("""google/flan-t5-xl""", dense_act_fn="""gelu""", bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__magic_name__ = TaConfig.from_pretrained("""google/flan-t5-xxl""", dense_act_fn="""gelu""", bos_token_id=1 ).to_dict()
__magic_name__ = BlipaConfig(vision_config=A_, text_config=A_ )
return config, image_size
@torch.no_grad()
def a__ ( A_, A_=None, A_=False ):
'''simple docstring'''
__magic_name__ = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
__magic_name__ = tokenizer("""\n""", add_special_tokens=A_ ).input_ids[0]
__magic_name__ , __magic_name__ = get_blipa_config(A_, eos_token_id=A_ )
__magic_name__ = BlipaForConditionalGeneration(A_ ).eval()
__magic_name__ = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
__magic_name__ , __magic_name__ = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
__magic_name__ = """cuda""" if torch.cuda.is_available() else """cpu"""
__magic_name__ , __magic_name__ , __magic_name__ = load_model_and_preprocess(
name=A_, model_type=A_, is_eval=A_, device=A_ )
original_model.eval()
print("""Done!""" )
# update state dict keys
__magic_name__ = original_model.state_dict()
__magic_name__ = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_, A_, A_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__magic_name__ = state_dict.pop(A_ )
if key.startswith("""Qformer.bert""" ):
__magic_name__ = key.replace("""Qformer.bert""", """qformer""" )
if "attention.self" in key:
__magic_name__ = key.replace("""self""", """attention""" )
if "opt_proj" in key:
__magic_name__ = key.replace("""opt_proj""", """language_projection""" )
if "t5_proj" in key:
__magic_name__ = key.replace("""t5_proj""", """language_projection""" )
if key.startswith("""opt""" ):
__magic_name__ = key.replace("""opt""", """language""" )
if key.startswith("""t5""" ):
__magic_name__ = key.replace("""t5""", """language""" )
__magic_name__ = val
# read in qv biases
read_in_q_v_bias(A_, A_ )
__magic_name__ , __magic_name__ = hf_model.load_state_dict(A_, strict=A_ )
assert len(A_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__magic_name__ = load_demo_image()
__magic_name__ = vis_processors["""eval"""](A_ ).unsqueeze(0 ).to(A_ )
__magic_name__ = tokenizer(["""\n"""], return_tensors="""pt""" ).input_ids.to(A_ )
# create processor
__magic_name__ = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size}, image_mean=A_, image_std=A_ )
__magic_name__ = BlipaProcessor(image_processor=A_, tokenizer=A_ )
__magic_name__ = processor(images=A_, return_tensors="""pt""" ).pixel_values.to(A_ )
# make sure processor creates exact same pixel values
assert torch.allclose(A_, A_ )
original_model.to(A_ )
hf_model.to(A_ )
with torch.no_grad():
if "opt" in model_name:
__magic_name__ = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
__magic_name__ = hf_model(A_, A_ ).logits
else:
__magic_name__ = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
__magic_name__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100 )
__magic_name__ = hf_model(A_, A_, labels=A_ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""", original_logits[0, :3, :3] )
print("""First values of HF logits:""", logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__magic_name__ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]], device=A_ )
assert torch.allclose(logits[0, :3, :3], A_, atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__magic_name__ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]], device=A_ )
else:
# cast to same type
__magic_name__ = logits.dtype
assert torch.allclose(original_logits.to(A_ ), A_, atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
__magic_name__ = """"""
__magic_name__ = tokenizer(A_, return_tensors="""pt""" ).input_ids.to(A_ )
__magic_name__ = original_model.generate({"""image""": original_pixel_values} )
__magic_name__ = hf_model.generate(
A_, A_, do_sample=A_, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, temperature=1, )
print("""Original generation:""", A_ )
__magic_name__ = input_ids.shape[1]
__magic_name__ = processor.batch_decode(outputs[:, prompt_length:], skip_special_tokens=A_ )
__magic_name__ = [text.strip() for text in output_text]
print("""HF generation:""", A_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
__lowerCAmelCase : Tuple = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 529 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( A_, A_, A_ ):
'''simple docstring'''
if gpta_config_file == "":
__magic_name__ = GPTaConfig()
else:
__magic_name__ = GPTaConfig.from_json_file(A_ )
__magic_name__ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_, A_, A_ )
# Save pytorch-model
__magic_name__ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__magic_name__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict(), A_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(A_, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__lowerCAmelCase : Tuple = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 529 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Any=10 , _UpperCAmelCase : str=3 , _UpperCAmelCase : List[str]=32 * 4 , _UpperCAmelCase : List[Any]=32 * 6 , _UpperCAmelCase : str=4 , _UpperCAmelCase : Any=32 , ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : List[Any] = batch_size
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : int = use_auxiliary_loss
_lowerCAmelCase : Optional[Any] = num_queries
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : int = min_size
_lowerCAmelCase : Dict = max_size
_lowerCAmelCase : Optional[Any] = num_labels
_lowerCAmelCase : Any = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
_lowerCAmelCase : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
_lowerCAmelCase : Any = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
_lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase : List[Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = output.encoder_hidden_states
_lowerCAmelCase : int = output.pixel_decoder_hidden_states
_lowerCAmelCase : Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = MaskFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_lowerCAmelCase : Optional[int] = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Any = MaskFormerForInstanceSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCAmelCase : List[str] = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __snake_case (_a , _a , unittest.TestCase ):
lowerCAmelCase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase__ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MaskFormerModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[str] = [*signature.parameters.keys()]
_lowerCAmelCase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCAmelCase : List[Any] = MaskFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
'''simple docstring'''
_lowerCAmelCase : str = (self.model_tester.min_size,) * 2
_lowerCAmelCase : Union[str, Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
_lowerCAmelCase : Any = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCAmelCase )
_lowerCAmelCase : Tuple = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCAmelCase : Optional[Any] = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_lowerCAmelCase : Any = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Any = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : int = True
_lowerCAmelCase : int = True
_lowerCAmelCase : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_lowerCAmelCase : Tuple = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
_lowerCAmelCase : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCAmelCase : List[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCAmelCase : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCAmelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCamelCase : Optional[int] = 1e-4
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __snake_case (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(_UpperCAmelCase )
_lowerCAmelCase : List[str] = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Tuple = image_processor(_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
_lowerCAmelCase : List[str] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**_UpperCAmelCase )
_lowerCAmelCase : List[str] = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
_lowerCAmelCase : str = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(_UpperCAmelCase )
.eval()
)
_lowerCAmelCase : Optional[Any] = self.default_image_processor
_lowerCAmelCase : Tuple = prepare_img()
_lowerCAmelCase : str = image_processor(_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
_lowerCAmelCase : Dict = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**_UpperCAmelCase )
# masks_queries_logits
_lowerCAmelCase : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase : List[str] = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
_lowerCAmelCase : Optional[int] = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
_lowerCAmelCase : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase : str = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(_UpperCAmelCase )
.eval()
)
_lowerCAmelCase : int = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : int = image_processor(_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**_UpperCAmelCase )
# masks_queries_logits
_lowerCAmelCase : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCAmelCase : List[Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_lowerCAmelCase : Any = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
_lowerCAmelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCAmelCase : Any = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(_UpperCAmelCase )
.eval()
)
_lowerCAmelCase : Optional[Any] = self.default_image_processor
_lowerCAmelCase : List[Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
_lowerCAmelCase : Any = inputs["""pixel_values"""].to(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = [el.to(_UpperCAmelCase ) for el in inputs["""mask_labels"""]]
_lowerCAmelCase : Optional[int] = [el.to(_UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 196 |
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [int(UpperCamelCase_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(UpperCamelCase_ ) == 4 and all(0 <= int(UpperCamelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
_lowerCamelCase : List[str] = input().strip()
_lowerCamelCase : int = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 196 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Any=2 , _lowercase : str=True , _lowercase : List[Any]=False , _lowercase : Optional[Any]=10 , _lowercase : Tuple=3 , _lowercase : int=32 * 8 , _lowercase : str=32 * 8 , _lowercase : Optional[Any]=4 , _lowercase : Optional[int]=64 , ):
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : Any = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_size
SCREAMING_SNAKE_CASE__ : Dict = max_size
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dim
SCREAMING_SNAKE_CASE__ : str = hidden_dim
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowercase )
SCREAMING_SNAKE_CASE__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowercase )
SCREAMING_SNAKE_CASE__ : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowercase ) > 0.5
).float()
SCREAMING_SNAKE_CASE__ : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_lowercase ) > 0.5).long()
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : str = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE__ : List[str] = self.num_queries
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE__ : int = self.num_channels
SCREAMING_SNAKE_CASE__ : List[str] = 64
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_28
SCREAMING_SNAKE_CASE__ : int = self.hidden_dim
SCREAMING_SNAKE_CASE__ : int = self.hidden_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = self.hidden_dim
return config
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] , _lowercase : int , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.encoder_hidden_states
SCREAMING_SNAKE_CASE__ : Any = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__ : Tuple = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , config.decoder_layers )
def lowercase__ ( self : str , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Any=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MaskaFormerModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(pixel_values=_lowercase , pixel_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_lowercase , output_hidden_states=_lowercase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowercase , _lowercase )
def lowercase__ ( self : str , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Any = MaskaFormerForUniversalSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
def comm_check_on_output(_lowercase : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(pixel_values=_lowercase , pixel_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
comm_check_on_output(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = model(
pixel_values=_lowercase , pixel_mask=_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
comm_check_on_output(_lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Any = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase : Dict = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase : str = False
lowerCamelCase : Dict = False
lowerCamelCase : Dict = False
lowerCamelCase : Any = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowercase__ ( self : str ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowercase )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowercase__ ( self : int ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowercase__ ( self : List[Any] ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowercase__ ( self : Optional[int] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase__ ( self : Union[str, Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@slow
def lowercase__ ( self : Dict ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskaFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowercase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowercase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowercase ).long(),
}
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.get_config()
SCREAMING_SNAKE_CASE__ : List[str] = MaskaFormerForUniversalSegmentation(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase , output_attentions=_lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self : List[Any] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : Optional[int] = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : str = model_class(_lowercase )
model.to(_lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : int = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase ).loss
loss.backward()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_lowercase ).to(_lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a_ :Optional[int] = 1e-4
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Tuple ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase__ ( self : Any ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 3_84, 3_84) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 3_84, 3_84) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**_lowercase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE__ : int = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE__ : str = torch.tensor(_lowercase ).to(_lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : Tuple = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ : Tuple = inputs['''pixel_values'''].to(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = [el.to(_lowercase ) for el in inputs['''mask_labels''']]
SCREAMING_SNAKE_CASE__ : List[Any] = [el.to(_lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
| 35 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class a ( unittest.TestCase ):
def __init__( self :List[str] ,__lowercase :Any ,__lowercase :Optional[Any]=1_3 ,__lowercase :Optional[Any]=7 ,__lowercase :Union[str, Any]=True ,__lowercase :Optional[int]=True ,__lowercase :Dict=True ,__lowercase :int=True ,__lowercase :List[str]=9_9 ,__lowercase :Optional[Any]=3_2 ,__lowercase :Dict=5 ,__lowercase :List[str]=4 ,__lowercase :Dict=3_7 ,__lowercase :Dict="gelu" ,__lowercase :Any=0.1 ,__lowercase :Any=0.1 ,__lowercase :int=5_1_2 ,__lowercase :List[str]=1_6 ,__lowercase :List[Any]=2 ,__lowercase :List[str]=0.02 ,__lowercase :Optional[int]=4 ,):
snake_case__ : Union[str, Any] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : Optional[int] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : Optional[Any] = use_attention_mask
snake_case__ : Tuple = use_token_type_ids
snake_case__ : str = use_labels
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Union[str, Any] = type_sequence_label_size
snake_case__ : Tuple = initializer_range
snake_case__ : Tuple = num_choices
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : Optional[Any] = None
if self.use_attention_mask:
snake_case__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : str = None
if self.use_token_type_ids:
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ : Any = AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def __lowerCamelCase ( self :Any ):
snake_case__ : Any = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : List[str] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Optional[int] = FlaxAlbertModelTester(self )
@slow
def __lowerCamelCase ( self :List[str] ):
for model_class_name in self.all_model_classes:
snake_case__ : Any = model_class_name.from_pretrained('''albert-base-v2''' )
snake_case__ : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
@require_flax
class a ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self :str ):
snake_case__ : str = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
snake_case__ : int = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case__ : List[str] = model(__lowercase ,attention_mask=__lowercase )[0]
snake_case__ : Optional[Any] = (1, 1_1, 7_6_8)
self.assertEqual(output.shape ,__lowercase )
snake_case__ : List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,__lowercase ,atol=1e-4 ) )
| 252 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 201 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 201 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase = [[0 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , SCREAMING_SNAKE_CASE ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE__ = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 532 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE__ = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class lowercase ( unittest.TestCase ):
def _snake_case ( self , lowercase , lowercase , lowercase = None , lowercase = None ) -> Dict:
lowerCAmelCase = None
lowerCAmelCase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
lowerCAmelCase = os.path.abspath("""examples""" )
for item in os.listdir(lowercase ):
if item not in EXCLUDE_EXAMPLES:
lowerCAmelCase = os.path.join(lowercase , lowercase )
if os.path.isfile(lowercase ) and ".py" in item_path:
with self.subTest(
tested_script=lowercase , feature_script=lowercase , tested_section="""main()""" if parser_only else """training_function()""" , ):
lowerCAmelCase = compare_against_test(
os.path.join(lowercase , lowercase ) , lowercase , lowercase , lowercase )
lowerCAmelCase = """\n""".join(lowercase )
if special_strings is not None:
for string in special_strings:
lowerCAmelCase = diff.replace(lowercase , """""" )
self.assertEqual(lowercase , """""" )
def _snake_case ( self ) -> List[Any]:
self.one_complete_example("""complete_nlp_example.py""" , lowercase )
self.one_complete_example("""complete_nlp_example.py""" , lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
lowerCAmelCase = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , lowercase , lowercase , lowercase )
self.one_complete_example("""complete_cv_example.py""" , lowercase , lowercase , lowercase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = False
@classmethod
def _snake_case ( cls ) -> Optional[int]:
super().setUpClass()
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _snake_case ( cls ) -> Optional[int]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _snake_case ( self ) -> str:
lowerCAmelCase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowerCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowercase )
self.assertNotIn("""epoch 0:""" , lowercase )
self.assertIn("""epoch 1:""" , lowercase )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowercase )
if torch.cuda.is_available():
lowerCAmelCase = torch.cuda.device_count()
else:
lowerCAmelCase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , lowercase )
self.assertIn("""epoch 1:""" , lowercase )
else:
self.assertIn("""epoch 0:""" , lowercase )
self.assertIn("""epoch 1:""" , lowercase )
@slow
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
lowerCAmelCase = run_command(self._launch_args + testargs , return_stdout=lowercase )
lowerCAmelCase = re.findall("""({.+})""" , lowercase )
lowerCAmelCase = [r for r in results if """accuracy""" in r][-1]
lowerCAmelCase = ast.literal_eval(lowercase )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def _snake_case ( self ) -> int:
lowerCAmelCase = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def _snake_case ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
lowerCAmelCase = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowercase , """tracking""" ) ) )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def _snake_case ( self ) -> int:
lowerCAmelCase = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 532 | 1 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowerCamelCase = '<<<<<<< This should probably be modified because it mentions: '
__lowerCamelCase = '=======\n>>>>>>>\n'
__lowerCamelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__lowerCamelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def a ( __UpperCAmelCase : int ) -> List[str]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __A ( __lowerCamelCase ):
@staticmethod
def lowerCamelCase__ ( __snake_case : Tuple ) -> List[Any]:
__magic_name__: str = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=a_ , required=a_ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=a_ , required=a_ , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=a_ )
def __init__( self : Dict , __snake_case : Any , __snake_case : Tuple , *__snake_case : Any ) -> Optional[Any]:
__magic_name__: List[str] = get_logger("""datasets-cli/converting""" )
__magic_name__: Any = tfds_path
__magic_name__: List[str] = datasets_directory
def lowerCamelCase__ ( self : Any ) -> Tuple:
if os.path.isdir(self._tfds_path ):
__magic_name__: Optional[int] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
__magic_name__: Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
__magic_name__: Any = os.path.abspath(self._datasets_directory )
self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
__magic_name__: str = []
__magic_name__: Dict = []
__magic_name__: Any = {}
if os.path.isdir(self._tfds_path ):
__magic_name__: Optional[int] = os.listdir(a_ )
else:
__magic_name__: Any = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'Looking at file {f_name}' )
__magic_name__: Union[str, Any] = os.path.join(a_ , a_ )
__magic_name__: List[str] = os.path.join(a_ , a_ )
if not os.path.isfile(a_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(a_ , encoding="""utf-8""" ) as f:
__magic_name__: Any = f.readlines()
__magic_name__: Optional[int] = []
__magic_name__: List[str] = False
__magic_name__: Union[str, Any] = False
__magic_name__: Optional[Any] = []
for line in lines:
__magic_name__: Tuple = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__magic_name__: str = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
__magic_name__: Any = ""
continue
elif "from absl import logging" in out_line:
__magic_name__: Tuple = "from datasets import logging\n"
elif "getLogger" in out_line:
__magic_name__: Tuple = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
__magic_name__: List[str] = True
__magic_name__: Tuple = list(filter(lambda __snake_case : e in out_line , a_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(a_ ) + """\n""" )
out_lines.append(a_ )
out_lines.append(a_ )
continue
else:
for pattern, replacement in TO_CONVERT:
__magic_name__: Tuple = re.sub(a_ , a_ , a_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__magic_name__: int = re.match(R"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , a_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
__magic_name__: List[Any] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__magic_name__: Optional[int] = True
out_lines.append(a_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__magic_name__: str = f_name.replace(""".py""" , """""" )
__magic_name__: str = os.path.join(a_ , a_ )
__magic_name__: Any = os.path.join(a_ , a_ )
os.makedirs(a_ , exist_ok=a_ )
self._logger.info(F'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(a_ )
if needs_manual_update:
with_manual_update.append(a_ )
with open(a_ , """w""" , encoding="""utf-8""" ) as f:
f.writelines(a_ )
self._logger.info(F'Converted in {output_file}' )
for utils_file in utils_files:
try:
__magic_name__: Any = os.path.basename(a_ )
__magic_name__: int = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F'Moving {dest_folder} to {utils_file}' )
shutil.copy(a_ , a_ )
except KeyError:
self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 707 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCamelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : str , __snake_case : int = 1_0_1 ) -> Tuple:
__magic_name__: Optional[Any] = length
def __len__( self : int ) -> Tuple:
return self.length
def __getitem__( self : Dict , __snake_case : str ) -> int:
return i
class __A :
def __call__( self : int , __snake_case : List[str] ) -> List[Any]:
return {"input_ids": torch.tensor(__snake_case ), "labels": torch.tensor(__snake_case )}
class __A ( nn.Module ):
def __init__( self : Any ) -> Dict:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__magic_name__: List[str] = nn.Linear(1_2_0 , 8_0 )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : List[str]=None ) -> str:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_torch_neuroncore
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
__magic_name__: List[Any] = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__magic_name__: Tuple = self.get_auto_remove_tmp_dir()
__magic_name__: Any = F'--output_dir {output_dir}'.split()
__magic_name__: Any = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(__snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_torch_multi_gpu
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__: List[Any] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
__magic_name__: Optional[Any] = self.get_auto_remove_tmp_dir()
__magic_name__: Union[str, Any] = F'--output_dir {output_dir}'.split()
__magic_name__: Union[str, Any] = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(__snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCamelCase = HfArgumentParser((TrainingArguments,))
__lowerCamelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
__lowerCamelCase = DummyDataset(dataset_length)
def a ( __UpperCAmelCase : EvalPrediction ) -> Dict:
__magic_name__: Any = list(range(len(__UpperCAmelCase ) ) )
__magic_name__: List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCamelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCamelCase = 2
__lowerCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCamelCase = None
| 213 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _UpperCAmelCase :
__SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
__SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
__SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
__SCREAMING_SNAKE_CASE : torch.Tensor # [batch_size x 3]
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : float
__SCREAMING_SNAKE_CASE : float
__SCREAMING_SNAKE_CASE : Tuple[int]
def a_ ( self ) -> int:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a_ ( self ) -> str:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a_ ( self ) -> List[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a_ ( self ) -> torch.Tensor:
UpperCAmelCase = torch.arange(self.height * self.width )
UpperCAmelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(lowercase_ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def a_ ( self ) -> int:
UpperCAmelCase , *UpperCAmelCase = self.shape
UpperCAmelCase = int(np.prod(lowercase_ ) )
UpperCAmelCase = self.get_image_coords()
UpperCAmelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase = self.get_camera_rays(lowercase_ )
UpperCAmelCase = rays.view(lowercase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a_ ( self , lowercase_ ) -> torch.Tensor:
UpperCAmelCase , *UpperCAmelCase , UpperCAmelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase = coords.view(lowercase_ , -1 , 2 )
UpperCAmelCase = self.resolution()
UpperCAmelCase = self.fov()
UpperCAmelCase = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase = fracs * torch.tan(fov / 2 )
UpperCAmelCase = fracs.view(lowercase_ , -1 , 2 )
UpperCAmelCase = (
self.z.view(lowercase_ , 1 , 3 )
+ self.x.view(lowercase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowercase_ , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase = directions / directions.norm(dim=-1 , keepdim=lowercase_ )
UpperCAmelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(lowercase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowercase_ , *lowercase_ , 2 , 3 )
def a_ ( self , lowercase_ , lowercase_ ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowercase_ , height=lowercase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowercase__ ( lowerCAmelCase : int ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
UpperCAmelCase = np.array([np.sin(lowerCAmelCase ), np.cos(lowerCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase = -z * 4
UpperCAmelCase = np.array([np.cos(lowerCAmelCase ), -np.sin(lowerCAmelCase ), 0.0] )
UpperCAmelCase = np.cross(lowerCAmelCase , lowerCAmelCase )
origins.append(lowerCAmelCase )
xs.append(lowerCAmelCase )
ys.append(lowerCAmelCase )
zs.append(lowerCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , width=lowerCAmelCase , height=lowerCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase )) , )
| 373 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
SCREAMING_SNAKE_CASE_ = 6_3_7_8_1_3_7.0
SCREAMING_SNAKE_CASE_ = 6_3_5_6_7_5_2.3_1_4_2_4_5
SCREAMING_SNAKE_CASE_ = 637_8137
def lowercase__ ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ) -> float:
"""simple docstring"""
UpperCAmelCase = (AXIS_A - AXIS_B) / AXIS_A
UpperCAmelCase = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
UpperCAmelCase = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
UpperCAmelCase = radians(lowerCAmelCase )
UpperCAmelCase = radians(lowerCAmelCase )
# Equation
UpperCAmelCase = sin((phi_a - phi_a) / 2 )
UpperCAmelCase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
UpperCAmelCase = sqrt(sin_sq_phi + (cos(lowerCAmelCase ) * cos(lowerCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 373 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase =logging.get_logger(__name__)
def __a ( A , A , A ) -> Dict:
'''simple docstring'''
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __a ( A , A , A ) -> Dict:
'''simple docstring'''
A__ = to_pil_image(A )
A__ , A__ = pil_image.size
A__ = pytesseract.image_to_data(A , lang=A , output_type="dict" , config=A )
A__ , A__ , A__ , A__ , A__ = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
A__ = [idx for idx, word in enumerate(A ) if not word.strip()]
A__ = [word for idx, word in enumerate(A ) if idx not in irrelevant_indices]
A__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
A__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
A__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
A__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
A__ = []
for x, y, w, h in zip(A , A , A , A ):
A__ = [x, y, x + w, y + h]
actual_boxes.append(A )
# finally, normalize the bounding boxes
A__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A , A , A ) )
assert len(A ) == len(A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : str = ["""pixel_values"""]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 2_55 , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = "" , **UpperCamelCase__ , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
A__ = size if size is not None else {"height": 2_24, "width": 2_24}
A__ = get_size_dict(UpperCamelCase__ )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_rescale
A__ = rescale_value
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
A__ = apply_ocr
A__ = ocr_lang
A__ = tesseract_config
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
A__ = (size["height"], size["width"])
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(UpperCamelCase__ )
A__ = resample if resample is not None else self.resample
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = apply_ocr if apply_ocr is not None else self.apply_ocr
A__ = ocr_lang if ocr_lang is not None else self.ocr_lang
A__ = tesseract_config if tesseract_config is not None else self.tesseract_config
A__ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(UpperCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
A__ = []
A__ = []
for image in images:
A__ , A__ = apply_tesseract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
words_batch.append(UpperCamelCase__ )
boxes_batch.append(UpperCamelCase__ )
if do_resize:
A__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
A__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
A__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
A__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
A__ = BatchFeature(data={"pixel_values": images} , tensor_type=UpperCamelCase__ )
if apply_ocr:
A__ = words_batch
A__ = boxes_batch
return data | 261 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Tuple = """blenderbot-small"""
lowercase__ : List[Any] = ["""past_key_values"""]
lowercase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , UpperCamelCase__=5_02_65 , UpperCamelCase__=5_12 , UpperCamelCase__=8 , UpperCamelCase__=20_48 , UpperCamelCase__=16 , UpperCamelCase__=8 , UpperCamelCase__=20_48 , UpperCamelCase__=16 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="gelu" , UpperCamelCase__=5_12 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=2 , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A__ = {0: "batch"}
A__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A__ = {0: "batch", 1: "decoder_sequence"}
A__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A__ , A__ = self.num_layers
for i in range(UpperCamelCase__ ):
A__ = {0: "batch", 2: "past_sequence + sequence"}
A__ = {0: "batch", 2: "past_sequence + sequence"}
else:
A__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super().outputs
else:
A__ = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A__ , A__ = self.num_layers
for i in range(UpperCamelCase__ ):
A__ = {0: "batch", 2: "past_sequence + sequence"}
A__ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A__ = seq_length if not self.use_past else 1
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
A__ = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ = common_inputs["input_ids"].shape
A__ = common_inputs["decoder_input_ids"].shape[1]
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = decoder_seq_length + 3
A__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A__ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A__ , A__ = self.num_layers
A__ = min(UpperCamelCase__ , UpperCamelCase__ )
A__ = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A__ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A__ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ , A__ = self.num_layers
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = common_inputs["attention_mask"].dtype
A__ = torch.cat(
[common_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A__ = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A__ = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A__ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A__ = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A__ = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A__ = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) | 261 | 1 |
import random
from typing import Any
def a__ ( A_ ):
'''simple docstring'''
for _ in range(len(__lowercase ) ):
__magic_name__ = random.randint(0, len(__lowercase ) - 1 )
__magic_name__ = random.randint(0, len(__lowercase ) - 1 )
__magic_name__ , __magic_name__ = data[b], data[a]
return data
if __name__ == "__main__":
__lowerCAmelCase : int = [0, 1, 2, 3, 4, 5, 6, 7]
__lowerCAmelCase : Dict = ['''python''', '''says''', '''hello''', '''!''']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 529 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = AlbertConfig.from_json_file(__lowercase )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCAmelCase = AlbertForPreTraining(__lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 236 | 0 |
def _lowerCAmelCase ( A__: int = 100_0000 ):
'''simple docstring'''
UpperCAmelCase = limit + 1
UpperCAmelCase = [0] * limit
for first_term in range(1 , A__ ):
for n in range(A__ , A__ , A__ ):
UpperCAmelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 391 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__magic_name__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__magic_name__ = []
__magic_name__ = []
__magic_name__ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
__magic_name__ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
"emoji": True,
},
}
]
__magic_name__ = 0
for log in Path().glob("*.log"):
__magic_name__ = 0
with open(log, "r") as f:
for line in f:
__magic_name__ = json.loads(line)
if line.get("nodeid", "") != "":
__magic_name__ = line["nodeid"]
if line.get("duration", None) is not None:
__magic_name__ = f'''{line["duration"]:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__magic_name__ = []
log.unlink()
__magic_name__ = ""
__magic_name__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__magic_name__ = []
__magic_name__ = {}
for test in failed_tests:
__magic_name__ = test[0].split("::")
__magic_name__ = data[0].split("/")[-1]
if data[0] not in filesafailed:
__magic_name__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__magic_name__ = [test[0] for test in failed_table]
__magic_name__ = list(set(files))
# Count number of instances in failed_tests
__magic_name__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__magic_name__ = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__magic_name__ = "Too many failed tests, please see the full report in the Action results."
__magic_name__ = len(err) + 10
__magic_name__ = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
__magic_name__ = "No failed tests! 🤗"
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__magic_name__ = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
__magic_name__ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
__magic_name__ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__magic_name__ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__magic_name__ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__magic_name__ = row[0]
else:
__magic_name__ = ""
__magic_name__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 391 | 1 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 64 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 1 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
lowercase__ : str = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = CodeGenTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
if kwargs.pop('add_bos_token' , _UpperCAmelCase):
__A : List[Any] = kwargs.pop('name_or_path' , '')
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
F'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.')
__A : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase) != add_prefix_space:
__A : List[str] = getattr(_UpperCAmelCase , pre_tok_state.pop('type'))
__A : Optional[Any] = add_prefix_space
__A : Union[str, Any] = pre_tok_class(**_UpperCAmelCase)
__A : Optional[int] = add_prefix_space
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = kwargs.get('is_split_into_words' , _UpperCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
__A : str = kwargs.get('is_split_into_words' , _UpperCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : List[str] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase)
return tuple(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = super().decode(
token_ids=_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , **_UpperCAmelCase , )
if truncate_before_pattern is not None and len(_UpperCAmelCase) > 0:
__A : Union[str, Any] = self.truncate(_UpperCAmelCase , _UpperCAmelCase)
return decoded_text
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
def find_re(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Any = pattern.search(_UpperCAmelCase , _UpperCAmelCase)
return m.start() if m else -1
__A : Optional[int] = [re.compile(_UpperCAmelCase , re.MULTILINE) for pattern in truncate_before_pattern]
__A : List[str] = list(re.finditer('^print' , _UpperCAmelCase , re.MULTILINE))
if len(_UpperCAmelCase) > 1:
__A : Optional[int] = completion[: prints[1].start()]
__A : List[Any] = list(re.finditer('^def' , _UpperCAmelCase , re.MULTILINE))
if len(_UpperCAmelCase) > 1:
__A : Union[str, Any] = completion[: defs[1].start()]
__A : Tuple = 0
__A : Any = [
pos for pos in [find_re(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) for terminal in terminals] if pos != -1
]
if len(_UpperCAmelCase) > 0:
return completion[: min(_UpperCAmelCase)]
else:
return completion | 338 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
) | 338 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : str = "audio-spectrogram-transformer"
def __init__( self : Any , lowercase : int=768 , lowercase : str=12 , lowercase : Optional[Any]=12 , lowercase : Optional[int]=3_072 , lowercase : List[Any]="gelu" , lowercase : int=0.0 , lowercase : str=0.0 , lowercase : Optional[Any]=0.02 , lowercase : Dict=1E-12 , lowercase : List[Any]=16 , lowercase : Dict=True , lowercase : int=10 , lowercase : Optional[Any]=10 , lowercase : Union[str, Any]=1_024 , lowercase : Optional[Any]=128 , **lowercase : Dict , ):
'''simple docstring'''
super().__init__(**lowercase )
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = patch_size
_snake_case = qkv_bias
_snake_case = frequency_stride
_snake_case = time_stride
_snake_case = max_length
_snake_case = num_mel_bins | 686 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = LEDConfig
_UpperCAmelCase : int = {}
_UpperCAmelCase : List[str] = "gelu"
def __init__( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Dict=13 , lowercase : Dict=7 , lowercase : Tuple=True , lowercase : Dict=False , lowercase : Dict=99 , lowercase : Any=32 , lowercase : List[Any]=2 , lowercase : List[str]=4 , lowercase : List[str]=37 , lowercase : Dict=0.1 , lowercase : int=0.1 , lowercase : List[Any]=20 , lowercase : int=2 , lowercase : Optional[Any]=1 , lowercase : List[str]=0 , lowercase : Optional[int]=4 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case = prepare_led_inputs_dict(lowercase , lowercase , lowercase )
_snake_case = tf.concat(
[tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , )
_snake_case = global_attention_mask
return config, inputs_dict
def A ( self : str , lowercase : str , lowercase : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFLEDModel(config=lowercase ).get_decoder()
_snake_case = inputs_dict['input_ids']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['attention_mask'][:1, :]
_snake_case = 1
# first forward pass
_snake_case = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(lowercase , attention_mask=lowercase )[0]
_snake_case = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def a_ ( __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : List[str]=None , __lowercase : str=None , ) -> Union[str, Any]:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_UpperCAmelCase : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_UpperCAmelCase : Tuple = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = True
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : str = False
_UpperCAmelCase : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFLEDModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = tf.zeros_like(inputs_dict['attention_mask'] )
_snake_case = 2
_snake_case = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_snake_case = True
_snake_case = self.model_tester.seq_length
_snake_case = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase : List[str] ):
_snake_case = outputs.decoder_attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase : List[str] ):
_snake_case = [t.numpy() for t in outputs.encoder_attentions]
_snake_case = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
_snake_case = len(lowercase )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
if self.is_encoder_decoder:
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_decoder_attentions_output(lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
# Check attention is always last and order is fine
_snake_case = True
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(self._prepare_for_class(lowercase , lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) )
self.assertEqual(model.config.output_hidden_states , lowercase )
check_encoder_attentions_output(lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
pass
def a_ ( __lowercase : str ) -> Optional[Any]:
return tf.constant(__lowercase , dtype=tf.intaa )
_lowerCamelCase : List[Any] = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 )
def A ( self : str ):
'''simple docstring'''
_snake_case = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_snake_case = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case = prepare_led_inputs_dict(model.config , lowercase , lowercase )
_snake_case = model(**lowercase )[0]
_snake_case = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase )
# change to expected output here
_snake_case = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 ) | 686 | 1 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def a ( A__ ) -> Any:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def a ( A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = np.max(_outputs , axis=-1 , keepdims=A__ )
SCREAMING_SNAKE_CASE__ : List[str] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=A__ )
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = '''sigmoid'''
lowerCamelCase : int = '''softmax'''
lowerCamelCase : Any = '''none'''
@add_end_docstrings(
_UpperCAmelCase , r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = False
lowerCamelCase : str = ClassificationFunction.NONE
def __init__( self : Dict , **_lowercase : Any ):
super().__init__(**_lowercase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowercase__ ( self : Any , _lowercase : Dict=None , _lowercase : str=None , _lowercase : int="" , **_lowercase : Optional[int] ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_kwargs
SCREAMING_SNAKE_CASE__ : Any = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
SCREAMING_SNAKE_CASE__ : str = self.model.config.return_all_scores
if isinstance(_lowercase , _lowercase ) or top_k is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = top_k
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , _lowercase , )
if return_all_scores:
SCREAMING_SNAKE_CASE__ : Tuple = None
else:
SCREAMING_SNAKE_CASE__ : int = 1
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
SCREAMING_SNAKE_CASE__ : Tuple = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , *_lowercase : str , **_lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = super().__call__(*_lowercase , **_lowercase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''top_k''' not in kwargs
if isinstance(args[0] , _lowercase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowercase__ ( self : List[Any] , _lowercase : List[Any] , **_lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.framework
if isinstance(_lowercase , _lowercase ):
return self.tokenizer(**_lowercase , return_tensors=_lowercase , **_lowercase )
elif isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1 and isinstance(inputs[0] , _lowercase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_lowercase , **_lowercase )
elif isinstance(_lowercase , _lowercase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
def lowercase__ ( self : Union[str, Any] , _lowercase : List[Any] ):
return self.model(**_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Dict , _lowercase : Optional[int]=None , _lowercase : Optional[int]=1 , _lowercase : str=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
SCREAMING_SNAKE_CASE__ : List[str] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
SCREAMING_SNAKE_CASE__ : Tuple = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
SCREAMING_SNAKE_CASE__ : List[Any] = self.model.config.function_to_apply
else:
SCREAMING_SNAKE_CASE__ : Tuple = ClassificationFunction.NONE
SCREAMING_SNAKE_CASE__ : Tuple = model_outputs['''logits'''][0]
SCREAMING_SNAKE_CASE__ : Tuple = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
SCREAMING_SNAKE_CASE__ : Optional[Any] = sigmoid(_lowercase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
SCREAMING_SNAKE_CASE__ : Dict = softmax(_lowercase )
elif function_to_apply == ClassificationFunction.NONE:
SCREAMING_SNAKE_CASE__ : Any = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
SCREAMING_SNAKE_CASE__ : Tuple = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(_lowercase )
]
if not _legacy:
dict_scores.sort(key=lambda _lowercase : x["score"] , reverse=_lowercase )
if top_k is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = dict_scores[:top_k]
return dict_scores
| 250 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self : List[str] , _lowercase : List[Any] , _lowercase : int=13 , _lowercase : Any=7 , _lowercase : Tuple=True , _lowercase : Union[str, Any]=True , _lowercase : List[str]=True , _lowercase : Optional[Any]=True , _lowercase : List[Any]=99 , _lowercase : List[str]=16 , _lowercase : List[Any]=36 , _lowercase : Any=6 , _lowercase : int=6 , _lowercase : str=6 , _lowercase : List[str]=37 , _lowercase : List[Any]="gelu" , _lowercase : Union[str, Any]=0.1 , _lowercase : List[str]=0.1 , _lowercase : List[Any]=5_12 , _lowercase : List[str]=16 , _lowercase : Optional[int]=2 , _lowercase : List[str]=0.02 , _lowercase : Union[str, Any]=3 , _lowercase : Optional[Any]=4 , _lowercase : Any=None , ):
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = seq_length
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[str] = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : int = embedding_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_groups
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : str = num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE__ : Optional[int] = scope
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Any ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple , _lowercase : List[str] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Dict = AlbertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
SCREAMING_SNAKE_CASE__ : str = model(_lowercase , token_type_ids=_lowercase )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , _lowercase : str , _lowercase : Dict , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Any = AlbertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , sentence_order_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowercase__ ( self : str , _lowercase : Tuple , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : List[str] = AlbertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Any = AlbertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[Any] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : str , _lowercase : Optional[Any] , _lowercase : int , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Any = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = AlbertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _lowercase : str , _lowercase : Tuple , _lowercase : int , _lowercase : List[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = AlbertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : str , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : List[Any] , _lowercase : int , _lowercase : List[str] , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_choices
SCREAMING_SNAKE_CASE__ : Dict = AlbertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Dict = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : Tuple = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : str = True
def lowercase__ ( self : Any , _lowercase : Any , _lowercase : Tuple , _lowercase : int=False ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Dict = AlbertModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = type
self.model_tester.create_and_check_model(*_lowercase )
@slow
def lowercase__ ( self : Union[str, Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[Any] = AlbertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Any = AlbertModel.from_pretrained('''albert-base-v2''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(_lowercase , attention_mask=_lowercase )[0]
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _lowercase )
SCREAMING_SNAKE_CASE__ : str = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 250 | 1 |
__A : Tuple = {str(digit): digit**5 for digit in range(1_0)}
def __a ( A__ : int ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A__ ) )
def __a ( ):
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(A__ ) )
if __name__ == "__main__":
print(solution()) | 16 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_lowerCAmelCase : Any = datasets.utils.logging.get_logger(__name__)
_lowerCAmelCase : Any = ["names", "prefix"]
_lowerCAmelCase : str = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
_lowerCAmelCase : List[str] = ["encoding_errors", "on_bad_lines"]
_lowerCAmelCase : int = ["date_format"]
@dataclass
class __snake_case ( datasets.BuilderConfig ):
SCREAMING_SNAKE_CASE__ = ","
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = "infer"
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = "."
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = '"'
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 10000
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = "strict"
SCREAMING_SNAKE_CASE__ = "error"
SCREAMING_SNAKE_CASE__ = None
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if self.delimiter is not None:
lowerCAmelCase__ = self.delimiter
if self.column_names is not None:
lowerCAmelCase__ = self.column_names
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,a_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __snake_case ( datasets.ArrowBasedBuilder ):
SCREAMING_SNAKE_CASE__ = CsvConfig
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
lowerCAmelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ ,(str, list, tuple) ):
lowerCAmelCase__ = data_files
if isinstance(a_ ,a_ ):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
lowerCAmelCase__ = []
for split_name, files in data_files.items():
if isinstance(a_ ,a_ ):
lowerCAmelCase__ = [files]
lowerCAmelCase__ = [dl_manager.iter_files(a_ ) for file in files]
splits.append(datasets.SplitGenerator(name=a_ ,gen_kwargs={'files': files} ) )
return splits
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
if self.config.features is not None:
lowerCAmelCase__ = self.config.features.arrow_schema
if all(not require_storage_cast(a_ ) for feature in self.config.features.values() ):
# cheaper cast
lowerCAmelCase__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=a_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowerCAmelCase__ = table_cast(a_ ,a_ )
return pa_table
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowerCAmelCase__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a_ ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
lowerCAmelCase__ = pd.read_csv(a_ ,iterator=a_ ,dtype=a_ ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(a_ ):
lowerCAmelCase__ = pa.Table.from_pandas(a_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a_ )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(a_ )}: {e}' )
raise
| 193 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 716 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
lowerCAmelCase_ = '▁'
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = AlbertTokenizer
def __init__( self : str ,_SCREAMING_SNAKE_CASE : int=None ,_SCREAMING_SNAKE_CASE : List[str]=None ,_SCREAMING_SNAKE_CASE : List[str]=True ,_SCREAMING_SNAKE_CASE : str=True ,_SCREAMING_SNAKE_CASE : Optional[int]=False ,_SCREAMING_SNAKE_CASE : List[str]="[CLS]" ,_SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" ,_SCREAMING_SNAKE_CASE : Tuple="<unk>" ,_SCREAMING_SNAKE_CASE : Union[str, Any]="[SEP]" ,_SCREAMING_SNAKE_CASE : Tuple="<pad>" ,_SCREAMING_SNAKE_CASE : Any="[CLS]" ,_SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" ,**_SCREAMING_SNAKE_CASE : Any ,) -> Tuple:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A = (
AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ,normalized=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,do_lower_case=_SCREAMING_SNAKE_CASE ,remove_space=_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def A( self : int ,_SCREAMING_SNAKE_CASE : List[int] ,_SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A( self : List[Any] ,_SCREAMING_SNAKE_CASE : List[int] ,_SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : str ,_SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 110 | 0 |
import numpy
# List of input, output pairs
__magic_name__ = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
__magic_name__ = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
__magic_name__ = [2, 4, 1, 5]
__magic_name__ = len(train_data)
__magic_name__ = 0.0_09
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_="train"):
'''simple docstring'''
return calculate_hypothesis_value(lowerCAmelCase_ , lowerCAmelCase_) - output(
lowerCAmelCase_ , lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = 0
for i in range(len(lowerCAmelCase_) - 1):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0])
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0])
return None
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=m):
'''simple docstring'''
lowerCamelCase_ : Tuple = 0
for i in range(lowerCAmelCase_):
if index == -1:
summation_value += _error(lowerCAmelCase_)
else:
summation_value += _error(lowerCAmelCase_) * train_data[i][0][index]
return summation_value
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = summation_of_cost_derivative(lowerCAmelCase_ , lowerCAmelCase_) / m
return cost_derivative_value
def __magic_name__ ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase_ : int = 0.00_00_02
lowerCamelCase_ : List[str] = 0
lowerCamelCase_ : Optional[Any] = 0
while True:
j += 1
lowerCamelCase_ : str = [0, 0, 0, 0]
for i in range(0 , len(lowerCAmelCase_)):
lowerCamelCase_ : Optional[Any] = get_cost_derivative(i - 1)
lowerCamelCase_ : List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase_ , lowerCAmelCase_ , atol=lowerCAmelCase_ , rtol=lowerCAmelCase_ , ):
break
lowerCamelCase_ : List[Any] = temp_parameter_vector
print(("Number of iterations:", j))
def __magic_name__ ( ):
'''simple docstring'''
for i in range(len(lowerCAmelCase_)):
print(("Actual output value:", output(lowerCAmelCase_ , "test")))
print(("Hypothesis output:", calculate_hypothesis_value(lowerCAmelCase_ , "test")))
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 250 |
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 250 | 1 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : str = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=_lowercase , dtype=jnp.bfloataa )
snake_case_ : Tuple = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=_lowercase , from_pt=_lowercase , dtype=jnp.bfloataa )
snake_case_ : Tuple = controlnet_params
snake_case_ : int = """bird"""
snake_case_ : Any = jax.device_count()
snake_case_ : List[str] = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
snake_case_ : Any = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case_ : List[Any] = jax.random.PRNGKey(0 )
snake_case_ : int = jax.random.split(_lowercase , jax.device_count() )
snake_case_ : List[str] = replicate(_lowercase )
snake_case_ : Optional[int] = shard(_lowercase )
snake_case_ : Optional[Any] = shard(_lowercase )
snake_case_ : str = pipe(
prompt_ids=_lowercase , image=_lowercase , params=_lowercase , prng_seed=_lowercase , num_inference_steps=5_0 , jit=_lowercase , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
snake_case_ : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ : Dict = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
snake_case_ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ : Dict = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=_lowercase , dtype=jnp.bfloataa )
snake_case_ : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=_lowercase , from_pt=_lowercase , dtype=jnp.bfloataa )
snake_case_ : str = controlnet_params
snake_case_ : List[str] = """Chef in the kitchen"""
snake_case_ : int = jax.device_count()
snake_case_ : int = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
snake_case_ : Tuple = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case_ : Tuple = jax.random.PRNGKey(0 )
snake_case_ : Optional[Any] = jax.random.split(_lowercase , jax.device_count() )
snake_case_ : Dict = replicate(_lowercase )
snake_case_ : Union[str, Any] = shard(_lowercase )
snake_case_ : Optional[int] = shard(_lowercase )
snake_case_ : Dict = pipe(
prompt_ids=_lowercase , image=_lowercase , params=_lowercase , prng_seed=_lowercase , num_inference_steps=5_0 , jit=_lowercase , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
snake_case_ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case_ : Dict = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
snake_case_ : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ : Tuple = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 704 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21 | 0 |
"""simple docstring"""
def A_ ( lowercase ) -> str:
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : Any = (1 << p) - 1
for _ in range(p - 2 ):
UpperCAmelCase_ : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 470 |
"""simple docstring"""
from __future__ import annotations
_snake_case = [True] * 1_0_0_0_0_0_1
_snake_case = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
_snake_case = False
i += 1
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
return seive[n]
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
return any(digit in '02468' for digit in str(SCREAMING_SNAKE_CASE ) )
def __snake_case ( SCREAMING_SNAKE_CASE: int = 100_0000 ):
"""simple docstring"""
_lowerCAmelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(SCREAMING_SNAKE_CASE ) and not contains_an_even_digit(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = str(SCREAMING_SNAKE_CASE )
_lowerCAmelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(SCREAMING_SNAKE_CASE ) )]
if all(is_prime(SCREAMING_SNAKE_CASE ) for i in list_nums ):
result.append(SCREAMING_SNAKE_CASE )
return result
def __snake_case ( ):
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 580 | 0 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase_ ):
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple ) -> None:
'''simple docstring'''
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 703 |
from __future__ import annotations
def _A ( _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : list[list[int]] = []
_UpperCAmelCase : list[int] = []
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : str = sum(_UpperCamelCase )
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return result
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
if sum(_UpperCamelCase ) > max_sum or (remaining_nums_sum + sum(_UpperCamelCase )) < max_sum:
return
if sum(_UpperCamelCase ) == max_sum:
result.append(_UpperCamelCase )
return
for index in range(_UpperCamelCase , len(_UpperCamelCase ) ):
create_state_space_tree(
_UpperCamelCase , _UpperCamelCase , index + 1 , [*path, nums[index]] , _UpperCamelCase , remaining_nums_sum - nums[index] , )
UpperCAmelCase__ : List[Any] = [3, 34, 4, 12, 5, 2]
UpperCAmelCase__ : str = 9
UpperCAmelCase__ : Tuple = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 416 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase__ = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 547 | import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase_ : UNetaDModel , lowercase_ : UNetaDModel , lowercase_ : DDPMScheduler , lowercase_ : Any , ) -> Any:
"""simple docstring"""
super().__init__()
_UpperCamelCase = value_function
_UpperCamelCase = unet
_UpperCamelCase = scheduler
_UpperCamelCase = env
_UpperCamelCase = env.get_dataset()
_UpperCamelCase = {}
for key in self.data.keys():
try:
_UpperCamelCase = self.data[key].mean()
except: # noqa: E722
pass
_UpperCamelCase = {}
for key in self.data.keys():
try:
_UpperCamelCase = self.data[key].std()
except: # noqa: E722
pass
_UpperCamelCase = env.observation_space.shape[0]
_UpperCamelCase = env.action_space.shape[0]
def __UpperCAmelCase ( self : Any , lowercase_ : Optional[int] , lowercase_ : Optional[Any]) -> Tuple:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __UpperCAmelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if type(lowercase_) is dict:
return {k: self.to_torch(lowercase_) for k, v in x_in.items()}
elif torch.is_tensor(lowercase_):
return x_in.to(self.unet.device)
return torch.tensor(lowercase_ , device=self.unet.device)
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str]) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
_UpperCamelCase = val.clone()
return x_in
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : int) -> Dict:
"""simple docstring"""
_UpperCamelCase = x.shape[0]
_UpperCamelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
_UpperCamelCase = torch.full((batch_size,) , lowercase_ , device=self.unet.device , dtype=torch.long)
for _ in range(lowercase_):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_UpperCamelCase = self.value_function(x.permute(0 , 2 , 1) , lowercase_).sample
_UpperCamelCase = torch.autograd.grad([y.sum()] , [x])[0]
_UpperCamelCase = self.scheduler._get_variance(lowercase_)
_UpperCamelCase = torch.exp(0.5 * posterior_variance)
_UpperCamelCase = model_std * grad
_UpperCamelCase = 0
_UpperCamelCase = x.detach()
_UpperCamelCase = x + scale * grad
_UpperCamelCase = self.reset_xa(lowercase_ , lowercase_ , self.action_dim)
_UpperCamelCase = self.unet(x.permute(0 , 2 , 1) , lowercase_).sample.permute(0 , 2 , 1)
# TODO: verify deprecation of this kwarg
_UpperCamelCase = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , predict_epsilon=lowercase_)["prev_sample"]
# apply conditions to the trajectory (set the initial state)
_UpperCamelCase = self.reset_xa(lowercase_ , lowercase_ , self.action_dim)
_UpperCamelCase = self.to_torch(lowercase_)
return x, y
def __call__( self : Optional[int] , lowercase_ : str , lowercase_ : int=64 , lowercase_ : Any=32 , lowercase_ : List[Any]=2 , lowercase_ : str=0.1) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.normalize(lowercase_ , "observations")
_UpperCamelCase = obs[None].repeat(lowercase_ , axis=0)
_UpperCamelCase = {0: self.to_torch(lowercase_)}
_UpperCamelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_UpperCamelCase = randn_tensor(lowercase_ , device=self.unet.device)
_UpperCamelCase = self.reset_xa(lowercase_ , lowercase_ , self.action_dim)
_UpperCamelCase = self.to_torch(lowercase_)
# run the diffusion process
_UpperCamelCase , _UpperCamelCase = self.run_diffusion(lowercase_ , lowercase_ , lowercase_ , lowercase_)
# sort output trajectories by value
_UpperCamelCase = y.argsort(0 , descending=lowercase_).squeeze()
_UpperCamelCase = x[sorted_idx]
_UpperCamelCase = sorted_values[:, :, : self.action_dim]
_UpperCamelCase = actions.detach().cpu().numpy()
_UpperCamelCase = self.de_normalize(lowercase_ , key="actions")
# select the action with the highest value
if y is not None:
_UpperCamelCase = 0
else:
# if we didn't run value guiding, select a random action
_UpperCamelCase = np.random.randint(0 , lowercase_)
_UpperCamelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 547 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMTokenizer
_SCREAMING_SNAKE_CASE : str = LayoutLMTokenizerFast
_SCREAMING_SNAKE_CASE : int = True
_SCREAMING_SNAKE_CASE : Optional[int] = True
def a ( self : Tuple ) -> List[str]:
super().setUp()
__lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def a ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
__lowerCAmelCase = """UNwant\u00E9d,running"""
__lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def a ( self : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [7, 4, 5, 10, 8, 9] )
def a ( self : Union[str, Any] ) -> Any:
pass
| 330 | '''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_A : List[str] = logging.get_logger(__name__)
_A : List[Any] = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
_A : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_A : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = """whisper"""
_SCREAMING_SNAKE_CASE : List[Any] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_18_65 , SCREAMING_SNAKE_CASE__ : List[str]=80 , SCREAMING_SNAKE_CASE__ : Tuple=6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Optional[Any]=15_36 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=5_02_57 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=2_56 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=15_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=4_48 , SCREAMING_SNAKE_CASE__ : Dict=5_02_56 , SCREAMING_SNAKE_CASE__ : List[str]=5_02_56 , SCREAMING_SNAKE_CASE__ : List[str]=5_02_56 , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : int=[2_20, 5_02_56] , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Tuple=2_56 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=10 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Tuple=7 , **SCREAMING_SNAKE_CASE__ : int , ) -> Dict:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = num_mel_bins
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = max_source_positions
__lowerCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
__lowerCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = apply_spec_augment
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
__lowerCAmelCase = mask_feature_min_masks
__lowerCAmelCase = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , suppress_tokens=SCREAMING_SNAKE_CASE__ , begin_suppress_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def a ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
__lowerCAmelCase = {0: """batch"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction="""inputs""" )
return common_inputs
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 2_20_50 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 2_20 , ) -> Mapping[str, Any]:
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , time_duration=SCREAMING_SNAKE_CASE__ , frequency=SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = encoder_inputs["""input_features"""].shape[2]
__lowerCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = encoder_inputs.pop("""input_features""" )
__lowerCAmelCase = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
__lowerCAmelCase = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def a ( self : Optional[int] ) -> float:
return 1e-3
| 330 | 1 |
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : Optional[int] = '''src/transformers'''
# Pattern that looks at the indentation in a line.
UpperCAmelCase_ : Tuple = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCAmelCase_ : List[Any] = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCAmelCase_ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCAmelCase_ : Any = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCAmelCase_ : List[Any] = re.compile(R'''\[([^\]]+)\]''')
def _UpperCamelCase (_lowerCamelCase : Dict )-> Dict:
'''simple docstring'''
__snake_case = _re_indent.search(lowerCAmelCase__ )
return "" if search is None else search.groups()[0]
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : int="" , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : List[str]=None )-> Optional[int]:
'''simple docstring'''
__snake_case = 0
__snake_case = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase__ ):
index += 1
__snake_case = ['''\n'''.join(lines[:index] )]
else:
__snake_case = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__snake_case = [lines[index]]
index += 1
while index < len(lowerCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCAmelCase__ ) )
if index < len(lowerCAmelCase__ ) - 1:
__snake_case = [lines[index + 1]]
index += 1
else:
__snake_case = []
else:
blocks.append('''\n'''.join(lowerCAmelCase__ ) )
__snake_case = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase__ ) > 0:
blocks.append('''\n'''.join(lowerCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _UpperCamelCase (_lowerCamelCase : List[Any] )-> List[Any]:
'''simple docstring'''
def _inner(_lowerCamelCase : Tuple ):
return key(lowerCAmelCase__ ).lower().replace('''_''' , '''''' )
return _inner
def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : List[str]=None )-> Optional[int]:
'''simple docstring'''
def noop(_lowerCamelCase : Dict ):
return x
if key is None:
__snake_case = noop
# Constants are all uppercase, they go first.
__snake_case = [obj for obj in objects if key(lowerCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__snake_case = [obj for obj in objects if key(lowerCAmelCase__ )[0].isupper() and not key(lowerCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
__snake_case = [obj for obj in objects if not key(lowerCAmelCase__ )[0].isupper()]
__snake_case = ignore_underscore(lowerCAmelCase__ )
return sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ )
def _UpperCamelCase (_lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
def _replace(_lowerCamelCase : Tuple ):
__snake_case = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
__snake_case = [part.strip().replace('''\"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__snake_case = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(lowerCAmelCase__ )] ) + "]"
__snake_case = import_statement.split('''\n''' )
if len(lowerCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__snake_case = 2 if lines[1].strip() == '''[''' else 1
__snake_case = [(i, _re_strip_line.search(lowerCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__snake_case = sort_objects(lowerCAmelCase__ , key=lambda _lowerCamelCase : x[1] )
__snake_case = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__snake_case = _re_bracket_content.sub(_replace , lines[1] )
else:
__snake_case = [part.strip().replace('''\"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__snake_case = keys[:-1]
__snake_case = get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(lowerCAmelCase__ )] )
return "\n".join(lowerCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
__snake_case = _re_bracket_content.sub(_replace , lowerCAmelCase__ )
return import_statement
def _UpperCamelCase (_lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]=True )-> str:
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
__snake_case = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__snake_case = split_code_in_indented_blocks(
lowerCAmelCase__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__snake_case = main_blocks[block_idx]
__snake_case = block.split('''\n''' )
# Get to the start of the imports.
__snake_case = 0
while line_idx < len(lowerCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__snake_case = len(lowerCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__snake_case = '''\n'''.join(block_lines[line_idx:-1] )
__snake_case = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__snake_case = split_code_in_indented_blocks(lowerCAmelCase__ , indent_level=lowerCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__snake_case = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__snake_case = [(pattern.search(lowerCAmelCase__ ).groups()[0] if pattern.search(lowerCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__snake_case = [(i, key) for i, key in enumerate(lowerCAmelCase__ ) if key is not None]
__snake_case = [x[0] for x in sorted(lowerCAmelCase__ , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__snake_case = 0
__snake_case = []
for i in range(len(lowerCAmelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__snake_case = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
__snake_case = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase__ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCAmelCase__ ) )
def _UpperCamelCase (_lowerCamelCase : Tuple=True )-> List[Any]:
'''simple docstring'''
__snake_case = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
__snake_case = sort_imports(os.path.join(lowerCAmelCase__ , '''__init__.py''' ) , check_only=lowerCAmelCase__ )
if result:
__snake_case = [os.path.join(lowerCAmelCase__ , '''__init__.py''' )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(f'''Would overwrite {len(lowerCAmelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 24 |
import math
def a (lowerCAmelCase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a (lowerCAmelCase__ = 10_001 ):
try:
__a = int(lowerCAmelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
__a = []
__a = 2
while len(lowerCAmelCase__ ) < nth:
if is_prime(lowerCAmelCase__ ):
primes.append(lowerCAmelCase__ )
num += 1
else:
num += 1
return primes[len(lowerCAmelCase__ ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 99 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCAmelCase__ = TypeVar("T")
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =None
_lowercase =len(lowerCAmelCase_ )
_lowercase =[any_type for _ in range(self.N )] + arr
_lowercase =fnc
self.build()
def __lowerCAmelCase ( self ):
for p in range(self.N - 1 , 0 , -1 ):
_lowercase =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
p += self.N
_lowercase =v
while p > 1:
_lowercase =p // 2
_lowercase =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ): # noqa: E741
_lowercase , _lowercase =l + self.N, r + self.N
_lowercase =None
while l <= r:
if l % 2 == 1:
_lowercase =self.st[l] if res is None else self.fn(lowerCAmelCase_ , self.st[l] )
if r % 2 == 0:
_lowercase =self.st[r] if res is None else self.fn(lowerCAmelCase_ , self.st[r] )
_lowercase , _lowercase =(l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCAmelCase__ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowerCAmelCase__ = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowerCAmelCase__ = SegmentTree(test_array, min)
lowerCAmelCase__ = SegmentTree(test_array, max)
lowerCAmelCase__ = SegmentTree(test_array, lambda a, b: a + b)
def __lowerCamelCase ( ) -> None:
for i in range(len(__a ) ):
for j in range(__a , len(__a ) ):
_lowercase =reduce(__a , test_array[i : j + 1] )
_lowercase =reduce(__a , test_array[i : j + 1] )
_lowercase =reduce(lambda __a , __a : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__a , __a )
assert max_range == max_segment_tree.query(__a , __a )
assert sum_range == sum_segment_tree.query(__a , __a )
test_all_segments()
for index, value in test_updates.items():
lowerCAmelCase__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 594 | import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCAmelCase__ = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowerCAmelCase__ = {"facebook/blenderbot-3B": 1_2_8}
class _a ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE = BlenderbotTokenizer
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="replace" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=False , lowerCAmelCase_=True , **lowerCAmelCase_ , ):
super().__init__(
lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , )
_lowercase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase_ ) != add_prefix_space:
_lowercase =getattr(lowerCAmelCase_ , pre_tok_state.pop("type" ) )
_lowercase =add_prefix_space
_lowercase =pre_tok_class(**lowerCAmelCase_ )
_lowercase =add_prefix_space
_lowercase ="post_processor"
_lowercase =getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ )
if tokenizer_component_instance:
_lowercase =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowercase =tuple(state["sep"] )
if "cls" in state:
_lowercase =tuple(state["cls"] )
_lowercase =False
if state.get("add_prefix_space" , lowerCAmelCase_ ) != add_prefix_space:
_lowercase =add_prefix_space
_lowercase =True
if state.get("trim_offsets" , lowerCAmelCase_ ) != trim_offsets:
_lowercase =trim_offsets
_lowercase =True
if changes_to_apply:
_lowercase =getattr(lowerCAmelCase_ , state.pop("type" ) )
_lowercase =component_class(**lowerCAmelCase_ )
setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCAmelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else value
_lowercase =value
def __lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
_lowercase =kwargs.get("is_split_into_words" , lowerCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
_lowercase =kwargs.get("is_split_into_words" , lowerCAmelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
_lowercase =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
_lowercase =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase_ )
_lowercase =" ".join(lowerCAmelCase_ )
_lowercase =self.encode(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > self.model_max_length:
_lowercase =input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 594 | 1 |
def _lowerCAmelCase ( _lowerCAmelCase ) -> Any:
'''simple docstring'''
if len(a__ ) <= 1:
return [tuple(a__ )]
__snake_case = []
def generate(_lowerCAmelCase , _lowerCAmelCase ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , a__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__snake_case , __snake_case = arr[k - 1], arr[i]
else: # k is odd
__snake_case , __snake_case = arr[k - 1], arr[0]
generate(k - 1 , a__ )
generate(len(a__ ) , a__ )
return res
if __name__ == "__main__":
A : int = input('Enter numbers separated by a comma:\n').strip()
A : Optional[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 371 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : List[Any] , __A : List[Any] , __A : Optional[int] ) -> Dict:
"""simple docstring"""
a_ : Optional[Any] = multiprocessing.Manager()
a_ : Tuple = manager.list()
a_ : Optional[int] = multiprocessing.Process(target=__A , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Optional[Any] , __A : Optional[Any] ) -> Dict:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
a_ : Any = shutil.rmtree
a_ : List[str] = os.rmdir
a_ : List[Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
a_ : Any = {}
with swallow_io():
with time_limit(__A ):
exec(__A , __A )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"""failed: {e}""" )
# Needed for cleaning up.
a_ : Tuple = rmtree
a_ : Any = rmdir
a_ : Optional[int] = chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str:
"""simple docstring"""
def signal_handler(__A : List[str] , __A : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , __A )
signal.signal(signal.SIGALRM , __A )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
"""simple docstring"""
a_ : int = WriteOnlyStringIO()
with contextlib.redirect_stdout(__A ):
with contextlib.redirect_stderr(__A ):
with redirect_stdin(__A ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(__A ):
yield dirname
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( io.StringIO ):
def SCREAMING_SNAKE_CASE ( self : List[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
raise OSError
def SCREAMING_SNAKE_CASE ( self : Tuple , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
raise OSError
def SCREAMING_SNAKE_CASE ( self : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
raise OSError
def SCREAMING_SNAKE_CASE ( self : Any , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
return False
class SCREAMING_SNAKE_CASE__ ( contextlib._RedirectStream ): # type: ignore
snake_case__ : Optional[int] = '''stdin'''
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if root == ".":
yield
return
a_ : str = os.getcwd()
os.chdir(__A )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__A )
def SCREAMING_SNAKE_CASE_ ( __A : Dict=None ) -> Dict:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
a_ : str = None
a_ : str = None
import os
a_ : Optional[int] = '1'
a_ : Any = None
a_ : int = None
a_ : int = None
a_ : Tuple = None
a_ : Dict = None
a_ : Any = None
a_ : Dict = None
a_ : Dict = None
a_ : List[str] = None
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : Union[str, Any] = None
a_ : Dict = None
a_ : Dict = None
a_ : int = None
a_ : str = None
a_ : Union[str, Any] = None
a_ : str = None
a_ : Optional[int] = None
a_ : List[Any] = None
a_ : Union[str, Any] = None
a_ : Dict = None
a_ : Tuple = None
a_ : List[str] = None
a_ : str = None
a_ : str = None
a_ : str = None
import shutil
a_ : Dict = None
a_ : Tuple = None
a_ : str = None
import subprocess
a_ : str = None # type: ignore
a_ : List[Any] = None
import sys
a_ : Optional[int] = None
a_ : int = None
a_ : Any = None
a_ : str = None
a_ : str = None
| 443 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ : int = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE__ :
snake_case__ : Union[str, Any] = PegasusConfig
snake_case__ : int = {}
snake_case__ : str = '''gelu'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple=1_3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : int=9_9 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_7 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=2_0 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=0 , ) -> Optional[Any]:
a_ : List[Any] = parent
a_ : Optional[int] = batch_size
a_ : str = seq_length
a_ : Any = is_training
a_ : str = use_labels
a_ : Union[str, Any] = vocab_size
a_ : List[str] = hidden_size
a_ : Any = num_hidden_layers
a_ : Tuple = num_attention_heads
a_ : int = intermediate_size
a_ : Union[str, Any] = hidden_dropout_prob
a_ : Optional[Any] = attention_probs_dropout_prob
a_ : Union[str, Any] = max_position_embeddings
a_ : List[str] = eos_token_id
a_ : Optional[Any] = pad_token_id
a_ : List[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
a_ : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
a_ : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
a_ : Dict = np.concatenate([input_ids, eos_tensor] , axis=1 )
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a_ : int = prepare_pegasus_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
a_ : List[str] = 2_0
a_ : Optional[Any] = model_class_name(SCREAMING_SNAKE_CASE__ )
a_ : Any = model.encode(inputs_dict['input_ids'] )
a_ , a_ : Optional[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a_ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
a_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
a_ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
a_ : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
a_ : List[Any] = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
a_ : List[str] = 2_0
a_ : Tuple = model_class_name(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = model.encode(inputs_dict['input_ids'] )
a_ , a_ : Optional[int] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a_ : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
a_ : str = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a_ : List[str] = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
a_ : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
a_ : List[Any] = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
a_ : Optional[Any] = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ )
a_ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : Any , __A : List[str] , __A : Any=None , __A : Any=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
a_ : Optional[int] = np.not_equal(__A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
a_ : Optional[int] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Any = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case__ : str = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case__ : List[Any] = True
snake_case__ : Optional[Any] = False
snake_case__ : Optional[int] = False
snake_case__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
a_ : List[str] = FlaxPegasusModelTester(self )
a_ : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ , a_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a_ : List[str] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
@jax.jit
def encode_jitted(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return model.encode(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
with self.subTest('JIT Enabled' ):
a_ : Optional[int] = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a_ : Any = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a_ : int = model_class(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
a_ : Dict = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ):
return model.decode(
decoder_input_ids=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , encoder_outputs=SCREAMING_SNAKE_CASE__ , )
with self.subTest('JIT Enabled' ):
a_ : Dict = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a_ : List[str] = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
for model_class_name in self.all_model_classes:
a_ : int = model_class_name.from_pretrained('google/pegasus-large' , from_pt=SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = np.ones((1, 1) )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : Optional[Any] = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
a_ : str = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
a_ : Optional[Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
a_ : Dict = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
a_ : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='np' , truncation=SCREAMING_SNAKE_CASE__ , max_length=5_1_2 , padding=SCREAMING_SNAKE_CASE__ )
a_ : Tuple = model.generate(**SCREAMING_SNAKE_CASE__ , num_beams=2 ).sequences
a_ : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
assert tgt_text == decoded
| 443 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
_A = logging.get_logger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : int , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 299 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
lowerCAmelCase__ , lowerCAmelCase__ : str = get_aligned_output_features_output_indices(UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , ["""c"""] )
self.assertEqual(UpperCamelCase , [2] )
# Out indices set to match out features
lowerCAmelCase__ , lowerCAmelCase__ : int = get_aligned_output_features_output_indices(["""a""", """c"""] , UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase , [0, 2] )
# Out features set to match out indices
lowerCAmelCase__ , lowerCAmelCase__ : int = get_aligned_output_features_output_indices(UpperCamelCase , [0, 2] , UpperCamelCase )
self.assertEqual(UpperCamelCase , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase , [0, 2] )
# Out features selected from negative indices
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = get_aligned_output_features_output_indices(UpperCamelCase , [-3, -1] , UpperCamelCase )
self.assertEqual(UpperCamelCase , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase , [-3, -1] )
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
# Stage names must be set
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , UpperCamelCase )
# Out features must be a list
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(UpperCamelCase , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(UpperCamelCase , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : str = BackboneMixin()
lowerCAmelCase__ : str = ["""a""", """b""", """c"""]
lowerCAmelCase__ : List[str] = ["""a""", """c"""]
lowerCAmelCase__ : Union[str, Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCAmelCase__ : List[str] = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCAmelCase__ : int = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 299 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCAmelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
snake_case_ : Tuple = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :List[Any] ):
'''simple docstring'''
snake_case_ : Dict = emb.weight.shape
snake_case_ : str = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
snake_case_ : str = emb.weight.data
return lin_layer
def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str]=None ):
'''simple docstring'''
snake_case_ : Union[str, Any] = {}
for old_key in state_dict.keys():
snake_case_ : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
snake_case_ : Dict = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
snake_case_ : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
snake_case_ : List[str] = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
snake_case_ : Any = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
snake_case_ : str = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
snake_case_ : str = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
snake_case_ : List[str] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
snake_case_ : Dict = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
snake_case_ : Dict = state_dict[old_key]
return new_dict
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :str = WEIGHTS_NAME ):
'''simple docstring'''
snake_case_ : Tuple = []
snake_case_ : Dict = 0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
for expert in range(lowerCamelCase_ ):
snake_case_ : Optional[Any] = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(lowerCamelCase_ ):
snake_case_ : List[Any] = torch.load(lowerCamelCase_ )["""model"""]
remove_ignore_keys_(lowerCamelCase_ )
snake_case_ : List[str] = rename_fairseq_keys(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : List[str] = os.path.join(
lowerCamelCase_ , weights_name.replace(""".bin""" , F'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(lowerCamelCase_ )[0]].dtype )
# Add the last block
snake_case_ : Tuple = os.path.join(lowerCamelCase_ , weights_name.replace(""".bin""" , F'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
snake_case_ : Tuple = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(lowerCamelCase_ )
snake_case_ : Tuple = rename_fairseq_keys(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(lowerCamelCase_ ) == 1:
snake_case_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(lowerCamelCase_ , lowerCamelCase_ )
# Otherwise, let's build the index
snake_case_ : str = {}
for idx, shard in enumerate(lowerCamelCase_ ):
snake_case_ : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin''' )
snake_case_ : Optional[int] = os.path.join(lowerCamelCase_ , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
for key in shard:
snake_case_ : Optional[int] = shard_file
# Add the metadata
snake_case_ : Any = {"""total_size""": total_size}
snake_case_ : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , """w""" , encoding="""utf-8""" ) as f:
snake_case_ : List[str] = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + """\n"""
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__A : List[str] = parser.parse_args()
__A : List[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__A : Tuple = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__A : List[str] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path) | 701 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __UpperCamelCase :
def __init__( self :Union[str, Any] ,_UpperCamelCase :int ,_UpperCamelCase :str=1_3 ,_UpperCamelCase :Tuple=7 ,_UpperCamelCase :Tuple=False ,_UpperCamelCase :Tuple=True ,_UpperCamelCase :Union[str, Any]=False ,_UpperCamelCase :int=True ,_UpperCamelCase :List[str]=3_3 ,_UpperCamelCase :Any=3_2 ,_UpperCamelCase :Any=5 ,_UpperCamelCase :List[str]=4 ,_UpperCamelCase :Tuple=3_7 ,_UpperCamelCase :Optional[Any]="gelu" ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :List[Any]=0.1 ,_UpperCamelCase :Any=5_1_2 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :Any=2 ,_UpperCamelCase :Optional[Any]=0.02 ,_UpperCamelCase :List[str]=3 ,_UpperCamelCase :Union[str, Any]=4 ,_UpperCamelCase :Dict=None ,):
snake_case_ : Tuple = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : Any = is_training
snake_case_ : List[Any] = use_input_mask
snake_case_ : int = use_token_type_ids
snake_case_ : Optional[int] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Optional[int] = max_position_embeddings
snake_case_ : List[str] = type_vocab_size
snake_case_ : int = type_sequence_label_size
snake_case_ : Dict = initializer_range
snake_case_ : Any = num_labels
snake_case_ : Any = num_choices
snake_case_ : Tuple = scope
def a__ ( self :str ):
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case_ : str = None
if self.use_input_mask:
snake_case_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Dict = None
snake_case_ : List[str] = None
snake_case_ : Tuple = None
if self.use_labels:
snake_case_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case_ : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self :Optional[Any] ):
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def a__ ( self :str ,_UpperCamelCase :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Any ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Optional[int] ):
snake_case_ : Union[str, Any] = EsmModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ : int = model(_UpperCamelCase ,attention_mask=_UpperCamelCase )
snake_case_ : str = model(_UpperCamelCase )
snake_case_ : Optional[int] = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def a__ ( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Dict ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Optional[int] ):
snake_case_ : str = EsmForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ : Dict = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Any ):
snake_case_ : List[Any] = self.num_labels
snake_case_ : int = EsmForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ : int = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self :Union[str, Any] ):
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Optional[int] = config_and_inputs
snake_case_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Optional[int] = False
lowercase : List[str] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase : int = ()
lowercase : List[str] = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : str = True
def a__ ( self :Any ):
snake_case_ : Any = EsmModelTester(self )
snake_case_ : str = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 )
def a__ ( self :Optional[Any] ):
self.config_tester.run_common_tests()
def a__ ( self :Optional[Any] ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def a__ ( self :Dict ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def a__ ( self :Dict ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def a__ ( self :List[str] ):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def a__ ( self :Union[str, Any] ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : int = EsmModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def a__ ( self :Tuple ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()[0]
snake_case_ : Optional[int] = EsmEmbeddings(config=_UpperCamelCase )
snake_case_ : List[Any] = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
snake_case_ : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
snake_case_ : Optional[Any] = create_position_ids_from_input_ids(_UpperCamelCase ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_UpperCamelCase ,_UpperCamelCase ) ) )
def a__ ( self :List[Any] ):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
snake_case_ : List[Any] = EsmEmbeddings(config=_UpperCamelCase )
snake_case_ : Dict = torch.empty(2 ,4 ,3_0 )
snake_case_ : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
snake_case_ : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
snake_case_ : str = embeddings.create_position_ids_from_inputs_embeds(_UpperCamelCase )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_UpperCamelCase ,_UpperCamelCase ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self :Optional[Any] ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self :Optional[int] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self :Optional[int] ):
pass
@require_torch
class __UpperCamelCase ( lowercase__ ):
@slow
def a__ ( self :Any ):
with torch.no_grad():
snake_case_ : Optional[Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
snake_case_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Dict = model(_UpperCamelCase )[0]
snake_case_ : Optional[int] = 3_3
snake_case_ : List[Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,_UpperCamelCase )
snake_case_ : Union[str, Any] = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_UpperCamelCase ,atol=1E-4 ) )
@slow
def a__ ( self :List[Any] ):
with torch.no_grad():
snake_case_ : List[Any] = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
snake_case_ : str = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
snake_case_ : Optional[Any] = model(_UpperCamelCase )[0]
# compare the actual values for a slice.
snake_case_ : List[str] = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_UpperCamelCase ,atol=1E-4 ) ) | 267 | 0 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
__snake_case : List[Any] = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__snake_case : int = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
__snake_case : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : List[Any] ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def A__ ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : int = CHRF.CHAR_ORDER , _lowerCamelCase : int = CHRF.WORD_ORDER , _lowerCamelCase : int = CHRF.BETA , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , ):
A__ = len(references[0] )
if any(len(lowercase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ = [[refs[i] for refs in references] for i in range(lowercase__ )]
A__ = CHRF(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
A__ = sb_chrf.corpus_score(lowercase__ , lowercase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 571 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 0 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( _a : int , _a : Any , _a : Union[str, Any] , _a : Tuple ):
'''simple docstring'''
snake_case__ : Any =BigBirdConfig.from_json_file(_a )
print(f"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
snake_case__ : str =BigBirdForQuestionAnswering(_a )
else:
snake_case__ : Optional[int] =BigBirdForPreTraining(_a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_a , _a , is_trivia_qa=_a )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_a )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 702 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Any = {"""vocab_file""": """spiece.model"""}
__lowerCamelCase : Optional[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
__lowerCamelCase : List[str] = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
class _lowercase ( _A ):
_a : int = VOCAB_FILES_NAMES
_a : List[str] = PRETRAINED_VOCAB_FILES_MAP
_a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Tuple = ['input_ids', 'attention_mask']
_a : List[int] = []
def __init__( self , a , a="<unk>" , a="<s>" , a="</s>" , a="<pad>" , a="[SEP]" , a="[MASK]" , a="[CLS]" , a = None , **a , ):
snake_case__ : str =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
snake_case__ : Optional[int] =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
snake_case__ : Any =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
snake_case__ : List[str] =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
snake_case__ : int =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
snake_case__ : Tuple =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Union[str, Any] =AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
snake_case__ : Union[str, Any] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , sep_token=a , mask_token=a , cls_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
snake_case__ : Optional[int] =vocab_file
snake_case__ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def lowercase__ ( self ):
return self.sp_model.get_piece_size()
def lowercase__ ( self ):
snake_case__ : Optional[Any] ={self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
snake_case__ : List[Any] =self.__dict__.copy()
snake_case__ : Optional[int] =None
return state
def __setstate__( self , a ):
snake_case__ : Optional[Any] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ : Optional[Any] ={}
snake_case__ : Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self , a ):
return self.sp_model.encode(a , out_type=a )
def lowercase__ ( self , a ):
return self.sp_model.piece_to_id(a )
def lowercase__ ( self , a ):
snake_case__ : Dict =self.sp_model.IdToPiece(a )
return token
def lowercase__ ( self , a ):
snake_case__ : Optional[int] =[]
snake_case__ : Dict =""""""
snake_case__ : Dict =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a ) + token
snake_case__ : Optional[Any] =True
snake_case__ : Dict =[]
else:
current_sub_tokens.append(a )
snake_case__ : List[Any] =False
out_string += self.sp_model.decode(a )
return out_string.strip()
def lowercase__ ( self , a , a = False , a = None , a = True , **a , ):
snake_case__ : List[str] =kwargs.pop("""use_source_tokenizer""" , a )
snake_case__ : int =self.convert_ids_to_tokens(a , skip_special_tokens=a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case__ : List[Any] =[]
snake_case__ : int =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a ) )
snake_case__ : List[Any] =[]
sub_texts.append(a )
else:
current_sub_text.append(a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case__ : Dict =re.sub(R""" (\[(MASK|SEP)\])""" , R"""\1""" , """ """.join(a ) )
else:
snake_case__ : Dict ="""""".join(a )
snake_case__ : str =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case__ : Any =self.clean_up_tokenization(a )
return clean_text
else:
return text
def lowercase__ ( self , a , a = None ):
if not os.path.isdir(a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case__ : Any =os.path.join(
a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , """wb""" ) as fi:
snake_case__ : Any =self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def lowercase__ ( self , a , a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Union[str, Any] =[self.cls_token_id]
snake_case__ : Dict =[self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self , a , a = None , a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def lowercase__ ( self , a , a = None ):
snake_case__ : List[str] =[self.sep_token_id]
snake_case__ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 448 | 0 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] ,_a : int = 128 ,_a : int = 256 ,_a : float = 2000.0 ,_a : int = 768 ,_a : int = 12 ,_a : int = 12 ,_a : int = 64 ,_a : int = 2048 ,_a : float = 0.1 ,):
'''simple docstring'''
super().__init__()
_a : int = nn.Sequential(
nn.Linear(_a ,d_model * 4 ,bias=_a ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=_a ) ,nn.SiLU() ,)
_a : Optional[int] = nn.Embedding(_a ,_a )
_a : List[Any] = False
_a : Union[str, Any] = nn.Linear(_a ,_a ,bias=_a )
_a : Dict = nn.Dropout(p=_a )
_a : Optional[int] = nn.ModuleList()
for lyr_num in range(_a ):
# FiLM conditional T5 decoder
_a : Optional[Any] = DecoderLayer(d_model=_a ,d_kv=_a ,num_heads=_a ,d_ff=_a ,dropout_rate=_a )
self.decoders.append(_a )
_a : Union[str, Any] = TaLayerNorm(_a )
_a : Optional[Any] = nn.Dropout(p=_a )
_a : Tuple = nn.Linear(_a ,_a ,bias=_a )
def __lowercase ( self : Optional[int] ,_a : Union[str, Any] ,_a : Tuple ):
'''simple docstring'''
_a : str = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __lowercase ( self : Any ,_a : Optional[Any] ,_a : List[str] ,_a : str ):
'''simple docstring'''
_a, _a, _a : Optional[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_a : Union[str, Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
_a : Optional[Any] = self.conditioning_emb(_a ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_a : Dict = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_a : List[str] = torch.broadcast_to(
torch.arange(_a ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
_a : List[str] = self.position_encoding(_a )
_a : str = self.continuous_inputs_projection(_a )
inputs += position_encodings
_a : str = self.dropout(_a )
# decoder: No padding present.
_a : List[Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_a : Optional[Any] = [(x, self.encoder_decoder_mask(_a ,_a )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_a : List[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
_a : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
_a : List[str] = lyr(
_a ,conditioning_emb=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)[0]
_a : int = self.decoder_norm(_a )
_a : List[str] = self.post_dropout(_a )
_a : Any = self.spec_out(_a )
return spec_out
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str ,_a : Any ,_a : Any ,_a : str ,_a : Union[str, Any] ,_a : str ,_a : str=1E-6 ):
'''simple docstring'''
super().__init__()
_a : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_a ,d_kv=_a ,num_heads=_a ,dropout_rate=_a ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_a ,d_kv=_a ,num_heads=_a ,dropout_rate=_a ,layer_norm_epsilon=_a ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_a ,d_ff=_a ,dropout_rate=_a ,layer_norm_epsilon=_a ) )
def __lowercase ( self : Dict ,_a : str ,_a : Union[str, Any]=None ,_a : Union[str, Any]=None ,_a : List[Any]=None ,_a : List[Any]=None ,_a : Union[str, Any]=None ,):
'''simple docstring'''
_a : Tuple = self.layer[0](
_a ,conditioning_emb=_a ,attention_mask=_a ,)
if encoder_hidden_states is not None:
_a : Union[str, Any] = torch.where(encoder_attention_mask > 0 ,0 ,-1E10 ).to(
encoder_hidden_states.dtype )
_a : List[str] = self.layer[1](
_a ,key_value_states=_a ,attention_mask=_a ,)
# Apply Film Conditional Feed Forward layer
_a : Tuple = self.layer[-1](_a ,_a )
return (hidden_states,)
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int ,_a : Optional[Any] ,_a : List[Any] ,_a : Any ,_a : int ):
'''simple docstring'''
super().__init__()
_a : Dict = TaLayerNorm(_a )
_a : int = TaFiLMLayer(in_features=d_model * 4 ,out_features=_a )
_a : int = Attention(query_dim=_a ,heads=_a ,dim_head=_a ,out_bias=_a ,scale_qk=_a )
_a : int = nn.Dropout(_a )
def __lowercase ( self : Optional[int] ,_a : Optional[Any] ,_a : List[Any]=None ,_a : List[Any]=None ,):
'''simple docstring'''
_a : List[str] = self.layer_norm(_a )
if conditioning_emb is not None:
_a : Any = self.FiLMLayer(_a ,_a )
# Self-attention block
_a : str = self.attention(_a )
_a : str = hidden_states + self.dropout(_a )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ,_a : Optional[Any] ,_a : Union[str, Any] ,_a : Optional[int] ,_a : List[str] ,_a : int ):
'''simple docstring'''
super().__init__()
_a : Optional[int] = Attention(query_dim=_a ,heads=_a ,dim_head=_a ,out_bias=_a ,scale_qk=_a )
_a : str = TaLayerNorm(_a ,eps=_a )
_a : int = nn.Dropout(_a )
def __lowercase ( self : Optional[Any] ,_a : List[str] ,_a : Optional[int]=None ,_a : List[str]=None ,):
'''simple docstring'''
_a : List[str] = self.layer_norm(_a )
_a : List[str] = self.attention(
_a ,encoder_hidden_states=_a ,attention_mask=attention_mask.squeeze(1 ) ,)
_a : Any = hidden_states + self.dropout(_a )
return layer_output
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : int ,_a : Optional[Any] ,_a : Any ,_a : Tuple ):
'''simple docstring'''
super().__init__()
_a : Any = TaDenseGatedActDense(d_model=_a ,d_ff=_a ,dropout_rate=_a )
_a : Optional[Any] = TaFiLMLayer(in_features=d_model * 4 ,out_features=_a )
_a : Dict = TaLayerNorm(_a ,eps=_a )
_a : Dict = nn.Dropout(_a )
def __lowercase ( self : Optional[Any] ,_a : Optional[Any] ,_a : Optional[Any]=None ):
'''simple docstring'''
_a : str = self.layer_norm(_a )
if conditioning_emb is not None:
_a : str = self.film(_a ,_a )
_a : Optional[int] = self.DenseReluDense(_a )
_a : Dict = hidden_states + self.dropout(_a )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : Any ,_a : Optional[int] ,_a : Union[str, Any] ):
'''simple docstring'''
super().__init__()
_a : Optional[Any] = nn.Linear(_a ,_a ,bias=_a )
_a : str = nn.Linear(_a ,_a ,bias=_a )
_a : Union[str, Any] = nn.Linear(_a ,_a ,bias=_a )
_a : Optional[int] = nn.Dropout(_a )
_a : Tuple = NewGELUActivation()
def __lowercase ( self : int ,_a : Dict ):
'''simple docstring'''
_a : Optional[Any] = self.act(self.wi_a(_a ) )
_a : Dict = self.wi_a(_a )
_a : List[str] = hidden_gelu * hidden_linear
_a : List[str] = self.dropout(_a )
_a : Dict = self.wo(_a )
return hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,_a : List[Any] ,_a : Any=1E-6 ):
'''simple docstring'''
super().__init__()
_a : List[str] = nn.Parameter(torch.ones(_a ) )
_a : Dict = eps
def __lowercase ( self : Optional[int] ,_a : str ):
'''simple docstring'''
_a : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=_a )
_a : Union[str, Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_a : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ,_a : torch.Tensor ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_a ,3.0 )) ))
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ):
'''simple docstring'''
super().__init__()
_a : Optional[int] = nn.Linear(_a ,out_features * 2 ,bias=_a )
def __lowercase ( self : Union[str, Any] ,_a : Optional[int] ,_a : Dict ):
'''simple docstring'''
_a : List[Any] = self.scale_bias(_a )
_a, _a : Dict = torch.chunk(_a ,2 ,-1 )
_a : int = x * (1 + scale) + shift
return x
| 229 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCAmelCase_ (__a : List[DatasetType] , __a : Optional[List[float]] = None , __a : Optional[int] = None , __a : Optional[DatasetInfo] = None , __a : Optional[NamedSplit] = None , __a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__a ):
if not isinstance(__a , (Dataset, IterableDataset) ):
if isinstance(__a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(__a )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}.""" )
if i == 0:
_a, _a : Tuple = (
(Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset)
)
elif not isinstance(__a , __a ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__a , __a , __a , info=__a , split=__a , stopping_strategy=__a )
else:
return _interleave_iterable_datasets(
__a , __a , __a , info=__a , split=__a , stopping_strategy=__a )
def UpperCAmelCase_ (__a : List[DatasetType] , __a : Optional[DatasetInfo] = None , __a : Optional[NamedSplit] = None , __a : int = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__a ):
if not isinstance(__a , (Dataset, IterableDataset) ):
if isinstance(__a , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(__a )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}.""" )
if i == 0:
_a, _a : Dict = (
(Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset)
)
elif not isinstance(__a , __a ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__a , info=__a , split=__a , axis=__a )
else:
return _concatenate_iterable_datasets(__a , info=__a , split=__a , axis=__a )
| 229 | 1 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def snake_case_ ( _lowerCAmelCase : Tuple ) -> int:
# A local function to see if a dot lands in the circle.
def is_in_circle(_lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ) -> bool:
UpperCAmelCase : Tuple = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : str = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : Any = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] = 0.0 , _lowerCAmelCase : List[Any] = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple = 0.0 , _lowerCAmelCase : str = 1.0 ) -> None:
def identity_function(_lowerCAmelCase : Union[str, Any] ) -> float:
return x
UpperCAmelCase : Dict = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print('''******************''' )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> None:
def function_to_integrate(_lowerCAmelCase : List[str] ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Optional[int] = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
UpperCamelCase__: Tuple = [8, 5, 9, 7]
UpperCamelCase__: int = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCamelCase__: List[str] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : str , __snake_case : list[int] , __snake_case : list[list[int]] , __snake_case : list[list[int]] , ) -> None:
UpperCAmelCase : Union[str, Any] = claim_vector
UpperCAmelCase : Tuple = allocated_resources_table
UpperCAmelCase : List[str] = maximum_claim_table
def A ( self : Optional[Any] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A ( self : Union[str, Any] ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A ( self : str ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A ( self : List[Any] ) -> dict[int, list[int]]:
return {self.__need().index(__snake_case ): i for i in self.__need()}
def A ( self : Tuple , **__snake_case : Union[str, Any] ) -> None:
UpperCAmelCase : Tuple = self.__need()
UpperCAmelCase : Union[str, Any] = self.__allocated_resources_table
UpperCAmelCase : int = self.__available_resources()
UpperCAmelCase : Tuple = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
UpperCAmelCase : Dict = False
for each_need in need_list:
UpperCAmelCase : Optional[int] = True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
UpperCAmelCase : List[str] = False
break
if execution:
UpperCAmelCase : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCAmelCase : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
UpperCAmelCase : Optional[int] = np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def A ( self : Dict ) -> Optional[Any]:
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__snake_case ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__snake_case ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 528 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Tuple = 'microsoft/speecht5_tts'
UpperCamelCase_ :List[Any] = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ :str = 'text_reader'
UpperCamelCase_ :Any = SpeechTaProcessor
UpperCamelCase_ :Optional[Any] = SpeechTaForTextToSpeech
UpperCamelCase_ :Dict = SpeechTaHifiGan
UpperCamelCase_ :List[str] = ['text']
UpperCamelCase_ :Optional[Any] = ['audio']
def __snake_case ( self : List[Any] ):
if self.post_processor is None:
lowerCAmelCase__ = '''microsoft/speecht5_hifigan'''
super().setup()
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
lowerCAmelCase__ = self.pre_processor(text=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
lowerCAmelCase__ = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
lowerCAmelCase__ = torch.tensor(embeddings_dataset[7_305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ):
with torch.no_grad():
return self.model.generate_speech(**SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] ):
with torch.no_grad():
return self.post_processor(SCREAMING_SNAKE_CASE_ ).cpu().detach()
| 668 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase : str = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_UpperCAmelCase : List[str] = {
"ctrl": 256,
}
_UpperCAmelCase : int = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(lowercase__ )
return pairs
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Optional[int] = CONTROL_CODES
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , **SCREAMING_SNAKE_CASE_ : Tuple ):
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = {}
@property
def __snake_case ( self : List[str] ):
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Any ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCAmelCase__ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
return word
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 668 | 1 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path / """cache"""
SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path / """cache"""
SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path / """cache"""
SCREAMING_SNAKE_CASE = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
SCREAMING_SNAKE_CASE = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
SCREAMING_SNAKE_CASE = features.copy()
SCREAMING_SNAKE_CASE = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE = tmp_path / """cache"""
SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path / """cache"""
SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE = jsonl_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE = [jsonl_path]
SCREAMING_SNAKE_CASE = tmp_path / """cache"""
SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ):
'''simple docstring'''
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path / """cache"""
SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path / """cache"""
SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE = JsonDatasetReader({"""train""": jsonl_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if split:
SCREAMING_SNAKE_CASE = {split: jsonl_path}
else:
SCREAMING_SNAKE_CASE = """train"""
SCREAMING_SNAKE_CASE = {"""train""": jsonl_path, """test""": jsonl_path}
SCREAMING_SNAKE_CASE = tmp_path / """cache"""
SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return json.load(UpperCamelCase__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return [json.loads(UpperCamelCase__ ) for line in buffer]
class UpperCAmelCase_ :
'''simple docstring'''
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _UpperCAmelCase ( self : List[str] , a : str , a : List[str] , a : str ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE = load_json_function(__a )
assert isinstance(__a , __a )
assert isinstance(exported_content[0] , __a )
assert len(__a ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _UpperCAmelCase ( self : Optional[int] , a : Tuple , a : Any , a : List[str] , a : Tuple , a : Dict ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , orient=__a ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE = load_json(__a )
assert isinstance(__a , __a )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__a , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__a ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _UpperCAmelCase ( self : Union[str, Any] , a : List[Any] , a : Any , a : Dict ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE = load_json_function(__a )
assert isinstance(__a , __a )
assert isinstance(exported_content[0] , __a )
assert len(__a ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _UpperCAmelCase ( self : str , a : Any , a : int , a : Tuple , a : List[Any] , a : Union[str, Any] ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , lines=__a , orient=__a , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE = load_json(__a )
assert isinstance(__a , __a )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__a , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__a ) == 10
def _UpperCAmelCase ( self : Optional[int] , a : List[Any] ) -> List[str]:
with pytest.raises(__a ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__a , __a , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def _UpperCAmelCase ( self : Tuple , a : Tuple , a : Union[str, Any] , a : Union[str, Any] , a : Any , a : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / f"""test.json.{extension}"""
SCREAMING_SNAKE_CASE = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(__a , __a , compression=__a ).write()
with fsspec.open(__a , """rb""" , compression="""infer""" ) as f:
SCREAMING_SNAKE_CASE = f.read()
with fsspec.open(__a , """rb""" , compression="""infer""" ) as f:
SCREAMING_SNAKE_CASE = f.read()
assert exported_content == original_content
| 720 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ViTImageProcessor'''
a__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[str] , a : Dict=None , a : List[str]=None , **a : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a , )
SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(a , a )
def __call__( self : List[Any] , a : int=None , a : Dict=None , a : int=None , a : Tuple=None , **a : Any ) -> Any:
if text is None and visual_prompt is None and images is None:
raise ValueError("""You have to specify either text, visual prompt or images.""" )
if text is not None and visual_prompt is not None:
raise ValueError("""You have to specify exactly one type of prompt. Either text or visual prompt.""" )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(a , return_tensors=a , **a )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE = self.image_processor(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(a , return_tensors=a , **a )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE = {
"""pixel_values""": image_features.pixel_values,
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE = {
"""conditional_pixel_values""": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _UpperCAmelCase ( self : Dict , *a : Dict , **a : Union[str, Any] ) -> Any:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self : Tuple , *a : List[Any] , **a : Union[str, Any] ) -> Dict:
return self.tokenizer.decode(*a , **a )
@property
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self : Optional[int] ) -> str:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a , )
return self.image_processor
| 450 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
a = logging.getLogger()
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """all_results.json""" )
if os.path.exists(__UpperCAmelCase ):
with open(__UpperCAmelCase , """r""" ) as f:
__SCREAMING_SNAKE_CASE = json.load(__UpperCAmelCase )
else:
raise ValueError(f"""can't find {path}""" )
return results
a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __a ( _snake_case ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
import xla_spawn
__SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCamelCase ,"""argv""" ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = time()
xla_spawn.main()
__SCREAMING_SNAKE_CASE = time()
__SCREAMING_SNAKE_CASE = get_results(lowerCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start ,500 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
import xla_spawn
__SCREAMING_SNAKE_CASE = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(lowerCamelCase ,"""argv""" ,lowerCamelCase ):
xla_spawn.main()
| 109 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : Optional[int] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "markuplm"
def __init__( self : str , A_ : List[Any]=3_05_22 , A_ : Dict=7_68 , A_ : str=12 , A_ : Tuple=12 , A_ : Any=30_72 , A_ : Union[str, Any]="gelu" , A_ : int=0.1 , A_ : List[str]=0.1 , A_ : int=5_12 , A_ : Tuple=2 , A_ : int=0.02 , A_ : str=1e-12 , A_ : str=0 , A_ : List[Any]=0 , A_ : Union[str, Any]=2 , A_ : Tuple=2_56 , A_ : Tuple=10_24 , A_ : str=2_16 , A_ : List[Any]=10_01 , A_ : Optional[int]=32 , A_ : List[str]=50 , A_ : List[str]="absolute" , A_ : Any=True , A_ : str=None , **A_ : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ , )
lowerCamelCase_: Optional[int] = vocab_size
lowerCamelCase_: Any = hidden_size
lowerCamelCase_: Tuple = num_hidden_layers
lowerCamelCase_: Optional[Any] = num_attention_heads
lowerCamelCase_: Tuple = hidden_act
lowerCamelCase_: Dict = intermediate_size
lowerCamelCase_: Any = hidden_dropout_prob
lowerCamelCase_: Dict = attention_probs_dropout_prob
lowerCamelCase_: str = max_position_embeddings
lowerCamelCase_: int = type_vocab_size
lowerCamelCase_: Optional[int] = initializer_range
lowerCamelCase_: Union[str, Any] = layer_norm_eps
lowerCamelCase_: List[str] = position_embedding_type
lowerCamelCase_: List[Any] = use_cache
lowerCamelCase_: Dict = classifier_dropout
# additional properties
lowerCamelCase_: Dict = max_depth
lowerCamelCase_: Tuple = max_xpath_tag_unit_embeddings
lowerCamelCase_: int = max_xpath_subs_unit_embeddings
lowerCamelCase_: str = tag_pad_id
lowerCamelCase_: List[str] = subs_pad_id
lowerCamelCase_: Tuple = xpath_unit_hidden_size
| 423 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowercase ( a_ ):
_lowerCamelCase : Optional[float]= field(
default=0.0, metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
_lowerCamelCase : bool= field(default=a_, metadata={"help": "Whether to SortishSamler or not."} )
_lowerCamelCase : bool= field(
default=a_, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
_lowerCamelCase : bool= field(default=a_, metadata={"help": "whether to use adafactor"} )
_lowerCamelCase : Optional[float]= field(
default=a_, metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
_lowerCamelCase : Optional[float]= field(
default=a_, metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
_lowerCamelCase : Optional[float]= field(default=a_, metadata={"help": "Dropout probability. Goes into model.config."} )
_lowerCamelCase : Optional[float]= field(
default=a_, metadata={"help": "Attention dropout probability. Goes into model.config."} )
_lowerCamelCase : Optional[str]= field(
default="linear", metadata={"help": F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""}, )
| 471 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase ( a_, a_ ):
_lowerCamelCase : Optional[int]= "swin"
_lowerCamelCase : Dict= {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _snake_case=224 , _snake_case=4 , _snake_case=3 , _snake_case=96 , _snake_case=[2, 2, 6, 2] , _snake_case=[3, 6, 12, 24] , _snake_case=7 , _snake_case=4.0 , _snake_case=True , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case="gelu" , _snake_case=False , _snake_case=0.02 , _snake_case=1e-5 , _snake_case=32 , _snake_case=None , _snake_case=None , **_snake_case , ) -> Tuple:
super().__init__(**_snake_case)
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = patch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Tuple = embed_dim
UpperCAmelCase_ : Dict = depths
UpperCAmelCase_ : Dict = len(_snake_case)
UpperCAmelCase_ : List[Any] = num_heads
UpperCAmelCase_ : Optional[int] = window_size
UpperCAmelCase_ : Any = mlp_ratio
UpperCAmelCase_ : Any = qkv_bias
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = drop_path_rate
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Tuple = use_absolute_embeddings
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : Union[str, Any] = int(embed_dim * 2 ** (len(_snake_case) - 1))
UpperCAmelCase_ : Tuple = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(_snake_case) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names)
class lowercase ( a_ ):
_lowerCamelCase : Tuple= version.parse("1.11" )
@property
def _snake_case ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def _snake_case ( self) -> float:
return 1e-4
| 471 | 1 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
return quad(snake_case__ , 0 , snake_case__ , args=(snake_case__) )[0]
def _snake_case ( snake_case__ : float , snake_case__ : float ):
return math.pow(snake_case__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod() | 91 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 0 |
class UpperCAmelCase__ :
def __init__( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Dict = n
UpperCAmelCase_: Dict = [None] * self.n
UpperCAmelCase_: Tuple = 0 # index of the first element
UpperCAmelCase_: List[Any] = 0
UpperCAmelCase_: List[Any] = 0
def __len__( self ):
"""simple docstring"""
return self.size
def snake_case_ ( self ):
"""simple docstring"""
return self.size == 0
def snake_case_ ( self ):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def snake_case_ ( self , A__ ):
"""simple docstring"""
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
UpperCAmelCase_: Tuple = data
UpperCAmelCase_: List[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def snake_case_ ( self ):
"""simple docstring"""
if self.size == 0:
raise Exception("UNDERFLOW" )
UpperCAmelCase_: Dict = self.array[self.front]
UpperCAmelCase_: Tuple = None
UpperCAmelCase_: int = (self.front + 1) % self.n
self.size -= 1
return temp | 707 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowerCAmelCase = """sshleifer/mar_enro_6_3_student"""
class UpperCAmelCase__ ( snake_case__ ):
def snake_case_ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_: List[Any] = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=A__ , )
UpperCAmelCase_: Dict = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def snake_case_ ( self ):
"""simple docstring"""
MarianMTModel.from_pretrained(A__ )
@slow
@require_torch_gpu
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
UpperCAmelCase_: int = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
UpperCAmelCase_: List[Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
UpperCAmelCase_: Tuple = bash_script.replace(A__ , str(A__ ) )
UpperCAmelCase_: Tuple = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase_: Dict = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase_: Optional[int] = ["finetune.py"] + bash_script.split() + args
with patch.object(A__ , "argv" , A__ ):
UpperCAmelCase_: List[str] = argparse.ArgumentParser()
UpperCAmelCase_: Union[str, Any] = pl.Trainer.add_argparse_args(A__ )
UpperCAmelCase_: int = SummarizationModule.add_model_specific_args(A__ , os.getcwd() )
UpperCAmelCase_: Tuple = parser.parse_args()
UpperCAmelCase_: Any = main(A__ )
# Check metrics
UpperCAmelCase_: List[str] = load_json(model.metrics_save_path )
UpperCAmelCase_: Any = metrics["val"][0]
UpperCAmelCase_: List[str] = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , A__ )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase_: Optional[int] = os.listdir(A__ )
UpperCAmelCase_: Optional[int] = [x for x in contents if x.endswith(".ckpt" )][0]
UpperCAmelCase_: str = os.path.join(args.output_dir , A__ )
UpperCAmelCase_: Any = torch.load(A__ , map_location="cpu" )
UpperCAmelCase_: List[str] = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase_: Tuple = {os.path.basename(A__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class UpperCAmelCase__ ( snake_case__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase_: List[Any] = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 128,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
UpperCAmelCase_: Optional[int] = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
UpperCAmelCase_: Dict = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
UpperCAmelCase_: Union[str, Any] = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
UpperCAmelCase_: Dict = bash_script.replace(A__ , str(A__ ) )
UpperCAmelCase_: List[str] = self.get_auto_remove_tmp_dir()
UpperCAmelCase_: Union[str, Any] = bash_script.replace("--fp16" , "" )
UpperCAmelCase_: Optional[int] = 6
UpperCAmelCase_: Any = (
["distillation.py"]
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
"--gpus=1",
"--learning_rate=1e-3",
F"--num_train_epochs={epochs}",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(A__ , "argv" , A__ ):
UpperCAmelCase_: int = argparse.ArgumentParser()
UpperCAmelCase_: Optional[int] = pl.Trainer.add_argparse_args(A__ )
UpperCAmelCase_: str = SummarizationDistiller.add_model_specific_args(A__ , os.getcwd() )
UpperCAmelCase_: Union[str, Any] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase_: str = distill_main(A__ )
# Check metrics
UpperCAmelCase_: int = load_json(model.metrics_save_path )
UpperCAmelCase_: Optional[int] = metrics["val"][0]
UpperCAmelCase_: str = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , A__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase_: Dict = os.listdir(A__ )
UpperCAmelCase_: Dict = [x for x in contents if x.endswith(".ckpt" )][0]
UpperCAmelCase_: Union[str, Any] = os.path.join(args.output_dir , A__ )
UpperCAmelCase_: str = torch.load(A__ , map_location="cpu" )
UpperCAmelCase_: Dict = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase_: List[Any] = {os.path.basename(A__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1 | 306 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase : Union[str, Any] = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
lowerCAmelCase : str = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
lowerCAmelCase : str = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : str):
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`.")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Sequence(datasets.Value("string" , id="sequence") , id="references"),
}) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int = CHRF.CHAR_ORDER , lowerCAmelCase__ : int = CHRF.WORD_ORDER , lowerCAmelCase__ : int = CHRF.BETA , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , ):
SCREAMING_SNAKE_CASE_: int = len(references[0])
if any(len(lowerCAmelCase__) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
SCREAMING_SNAKE_CASE_: str = [[refs[i] for refs in references] for i in range(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: Optional[Any] = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 671 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase : Union[str, Any] = 637_8137.0
lowerCAmelCase : int = 635_6752.31_4245
lowerCAmelCase : Union[str, Any] = 6378137
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A
SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase )
# Equation
SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 )
SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __snake_case ( _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[Any]):
UpperCamelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCamelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_).raw).convert('''RGB''')
UpperCamelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3), (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1)),
])
UpperCamelCase = transform(lowerCAmelCase_).unsqueeze(0).to(lowerCAmelCase_)
return image
def __snake_case ( _UpperCAmelCase : int):
if "visual_encoder" in key:
UpperCamelCase = re.sub('''visual_encoder*''', '''vision_model.encoder''', lowerCAmelCase_)
if "blocks" in key:
UpperCamelCase = re.sub(R'''blocks''', '''layers''', lowerCAmelCase_)
if "attn" in key:
UpperCamelCase = re.sub(R'''attn''', '''self_attn''', lowerCAmelCase_)
if "norm1" in key:
UpperCamelCase = re.sub(R'''norm1''', '''layer_norm1''', lowerCAmelCase_)
if "norm2" in key:
UpperCamelCase = re.sub(R'''norm2''', '''layer_norm2''', lowerCAmelCase_)
if "encoder.norm" in key:
UpperCamelCase = re.sub(R'''encoder.norm''', '''post_layernorm''', lowerCAmelCase_)
if "encoder.patch_embed.proj" in key:
UpperCamelCase = re.sub(R'''encoder.patch_embed.proj''', '''embeddings.patch_embedding''', lowerCAmelCase_)
if "encoder.pos_embed" in key:
UpperCamelCase = re.sub(R'''encoder.pos_embed''', '''embeddings.position_embedding''', lowerCAmelCase_)
if "encoder.cls_token" in key:
UpperCamelCase = re.sub(R'''encoder.cls_token''', '''embeddings.class_embedding''', lowerCAmelCase_)
if "self_attn" in key:
UpperCamelCase = re.sub(R'''self_attn.proj''', '''self_attn.projection''', lowerCAmelCase_)
return key
@torch.no_grad()
def __snake_case ( _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any]=None):
if config_path is not None:
UpperCamelCase = BlipConfig.from_pretrained(lowerCAmelCase_)
else:
UpperCamelCase = BlipConfig(projection_dim=512, text_config={}, vision_config={})
UpperCamelCase = BlipForConditionalGeneration(lowerCAmelCase_).eval()
UpperCamelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCamelCase = blip_decoder(pretrained=lowerCAmelCase_, image_size=384, vit='''base''')
UpperCamelCase = pt_model.eval()
UpperCamelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(lowerCAmelCase_)
UpperCamelCase = rename_key(lowerCAmelCase_)
UpperCamelCase = value
hf_model.load_state_dict(lowerCAmelCase_)
UpperCamelCase = 384
UpperCamelCase = load_demo_image(image_size=lowerCAmelCase_, device='''cpu''')
UpperCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''')
UpperCamelCase = tokenizer(['''a picture of''']).input_ids
UpperCamelCase = hf_model.generate(lowerCAmelCase_, lowerCAmelCase_)
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCamelCase = hf_model.generate(lowerCAmelCase_)
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCAmelCase_)
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCamelCase = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCamelCase = blip_vqa(pretrained=lowerCAmelCase_, image_size=lowerCAmelCase_, vit='''base''')
vqa_model.eval()
UpperCamelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(lowerCAmelCase_)
UpperCamelCase = rename_key(lowerCAmelCase_)
UpperCamelCase = value
UpperCamelCase = BlipForQuestionAnswering(lowerCAmelCase_)
hf_vqa_model.load_state_dict(lowerCAmelCase_)
UpperCamelCase = ["How many dogs are in this image?"]
UpperCamelCase = tokenizer(lowerCAmelCase_, return_tensors='''pt''').input_ids
UpperCamelCase = hf_vqa_model.generate(lowerCAmelCase_, lowerCAmelCase_)
print(tokenizer.decode(answer[0]))
assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''')
UpperCamelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCamelCase = blip_itm(pretrained=lowerCAmelCase_, image_size=lowerCAmelCase_, vit='''base''')
itm_model.eval()
UpperCamelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCamelCase = modified_state_dict.pop(lowerCAmelCase_)
UpperCamelCase = rename_key(lowerCAmelCase_)
UpperCamelCase = value
UpperCamelCase = BlipForImageTextRetrieval(lowerCAmelCase_)
UpperCamelCase = ["A picture of a woman with a dog sitting in a beach"]
UpperCamelCase = tokenizer(
lowerCAmelCase_, return_tensors='''pt''', padding='''max_length''', truncation=lowerCAmelCase_, max_length=35, ).input_ids
hf_itm_model.load_state_dict(lowerCAmelCase_)
hf_itm_model.eval()
UpperCamelCase = hf_itm_model(lowerCAmelCase_, lowerCAmelCase_, use_itm_head=lowerCAmelCase_)
UpperCamelCase = hf_itm_model(lowerCAmelCase_, lowerCAmelCase_, use_itm_head=lowerCAmelCase_)
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0], dim=1)[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''')
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
snake_case_ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 707 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Tuple = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
snake_case_ : Union[str, Any] = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def __snake_case ( _UpperCAmelCase : List[str], _UpperCAmelCase : List[str]):
UpperCamelCase = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCamelCase = int(re.match(R'''.*layer_(\d*).*''', _UpperCAmelCase)[1])
layer_number -= 3
return f'h.{layer_number}.' + key
def __snake_case ( _UpperCAmelCase : str):
if dtype == torch.bool:
return 1 / 8
UpperCamelCase = re.search(R'''[^\d](\d+)$''', str(_UpperCAmelCase))
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.')
UpperCamelCase = int(bit_search.groups()[0])
return bit_size // 8
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]):
# Construct model
if bloom_config_file == "":
UpperCamelCase = BloomConfig()
else:
UpperCamelCase = BloomConfig.from_json_file(_UpperCAmelCase)
if shard_model:
UpperCamelCase = os.listdir(_UpperCAmelCase)
UpperCamelCase = sorted(filter(lambda _UpperCAmelCase: s.startswith('''layer''') and "model_00" in s, _UpperCAmelCase))
UpperCamelCase = {'''weight_map''': {}, '''metadata''': {}}
UpperCamelCase = 0
UpperCamelCase = None
UpperCamelCase = BloomConfig()
for j, file in enumerate(_UpperCAmelCase):
print('''Processing file: {}'''.format(_UpperCAmelCase))
UpperCamelCase = None
for i in range(_UpperCAmelCase):
# load all TP files
UpperCamelCase = file.replace('''model_00''', f'model_0{i}')
UpperCamelCase = torch.load(os.path.join(_UpperCAmelCase, _UpperCAmelCase), map_location='''cpu''')
# Rename keys in the transformers names
UpperCamelCase = list(temp.keys())
for key in keys:
UpperCamelCase = temp.pop(_UpperCAmelCase)
if tensors is None:
UpperCamelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase = torch.cat([tensors[key], temp[key]], dim=_UpperCAmelCase)
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
UpperCamelCase = tensors[key] / pretraining_tp
torch.save(
_UpperCAmelCase, os.path.join(
_UpperCAmelCase, '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1).zfill(5), str(len(_UpperCAmelCase)).zfill(5)), ), )
for key in tensors.keys():
UpperCamelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype)
if key not in index_dict["weight_map"]:
UpperCamelCase = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1).zfill(5), str(len(_UpperCAmelCase)).zfill(5))
UpperCamelCase = BloomConfig()
UpperCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCamelCase = total_size
with open(_UpperCAmelCase, '''w''', encoding='''utf-8''') as f:
f.write(config.to_json_string())
with open(os.path.join(_UpperCAmelCase, WEIGHTS_NAME + '''.index.json'''), '''w''', encoding='''utf-8''') as f:
UpperCamelCase = json.dumps(_UpperCAmelCase, indent=2, sort_keys=_UpperCAmelCase) + '''\n'''
f.write(_UpperCAmelCase)
else:
UpperCamelCase = BloomModel(_UpperCAmelCase)
UpperCamelCase = os.listdir(_UpperCAmelCase)
UpperCamelCase = sorted(filter(lambda _UpperCAmelCase: s.startswith('''layer''') and "model_00" in s, _UpperCAmelCase))
UpperCamelCase = None
for i, file in enumerate(_UpperCAmelCase):
UpperCamelCase = None
for i in range(_UpperCAmelCase):
# load all TP files
UpperCamelCase = file.replace('''model_00''', f'model_0{i}')
UpperCamelCase = torch.load(os.path.join(_UpperCAmelCase, _UpperCAmelCase), map_location='''cpu''')
# Rename keys in the transformers names
UpperCamelCase = list(temp.keys())
for key in keys:
UpperCamelCase = temp.pop(_UpperCAmelCase)
if tensors is None:
UpperCamelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_UpperCAmelCase) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase = torch.cat([tensors[key], temp[key]], dim=_UpperCAmelCase)
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
UpperCamelCase = tensors[key] / pretraining_tp
UpperCamelCase = model.load_state_dict(_UpperCAmelCase, strict=_UpperCAmelCase)
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
UpperCamelCase = set(other_keys.missing_keys)
else:
UpperCamelCase = missing_keys.intersection(set(other_keys.missing_keys))
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(_UpperCAmelCase, exist_ok=_UpperCAmelCase)
UpperCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}')
if config.torch_dtype is not None:
UpperCamelCase = model.to(config.torch_dtype)
torch.save(model.state_dict(), _UpperCAmelCase)
print(f'Save configuration file to {pytorch_config_dump_path}')
with open(_UpperCAmelCase, '''w''', encoding='''utf-8''') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
snake_case_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
snake_case_ : List[str] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 350 | 0 |
# flake8: noqa
# Lint as: python3
lowercase_ = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 74 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
lowerCAmelCase_ = 1_00_00
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCAmelCase_ = ParquetConfig
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
__SCREAMING_SNAKE_CASE : Tuple = data_files
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__SCREAMING_SNAKE_CASE : int = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_A ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) )
break
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def UpperCAmelCase__ ( self : str , _A : pa.Table ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : Tuple , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' )
raise
| 74 | 1 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCamelCase__ : int = pytest.mark.integration
UpperCamelCase__ : int = {'comet'}
UpperCamelCase__ : Tuple = importlib.util.find_spec('fairseq') is not None
UpperCamelCase__ : int = {'code_eval'}
UpperCamelCase__ : List[Any] = os.name == 'nt'
UpperCamelCase__ : int = {'bertscore', 'frugalscore', 'perplexity'}
UpperCamelCase__ : Union[str, Any] = importlib.util.find_spec('transformers') is not None
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
@wraps(a_ )
def wrapper(self , a_ ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , a_ )
return wrapper
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
@wraps(a_ )
def wrapper(self , a_ ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , a_ )
return wrapper
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
@wraps(a_ )
def wrapper(self , a_ ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , a_ )
return wrapper
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
A_ : Tuple = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__A, __A, __A )
@local
class _lowerCAmelCase ( parameterized.TestCase ):
"""simple docstring"""
lowerCamelCase = {}
lowerCamelCase = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
A_ : List[Any] = """[...]"""
A_ : List[str] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , _lowerCamelCase ) ).module_path )
A_ : int = datasets.load.import_main_class(metric_module.__name__ , dataset=_lowerCamelCase )
# check parameters
A_ : Tuple = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_lowerCamelCase , metric_module.__name__ ):
with self.use_local_metrics():
try:
A_ : Union[str, Any] = doctest.testmod(_lowerCamelCase , verbose=_lowerCamelCase , raise_on_error=_lowerCamelCase )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[str]:
A_ : int = """[...]"""
A_ : Any = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , _lowerCamelCase ) ).module_path )
# run doctest
with self.use_local_metrics():
A_ : Union[str, Any] = doctest.testmod(_lowerCamelCase , verbose=_lowerCamelCase , raise_on_error=_lowerCamelCase )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_lowerCamelCase ):
yield
else:
yield
@contextmanager
def UpperCAmelCase_ ( self ) -> List[Any]:
def load_local_metric(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
return load_metric(os.path.join("""metrics""" , _lowerCamelCase ) , *_lowerCamelCase , **_lowerCamelCase )
with patch("""datasets.load_metric""" ) as mock_load_metric:
A_ : int = load_local_metric
yield
@classmethod
def UpperCAmelCase_ ( cls , _lowerCamelCase ) -> Any:
def wrapper(_lowerCamelCase ):
A_ : List[str] = contextmanager(_lowerCamelCase )
A_ : List[Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[Any]:
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
A_ : Any = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
import torch
def bert_cos_score_idf(a_ , a_ , *a_ , **a_ ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(a_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
A_ : List[Any] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
def load_from_checkpoint(a_ ):
class _lowerCAmelCase :
"""simple docstring"""
def UpperCAmelCase_ ( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) -> Any:
assert len(_lowerCamelCase ) == 2
A_ : Any = [0.19, 0.92]
return scores, sum(_lowerCamelCase ) / len(_lowerCamelCase )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
A_ : int = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
A_ : Optional[Any] = load_from_checkpoint
yield
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
A_ : Optional[int] = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
A_ : Union[str, Any] = """ERROR"""
A_ : List[Any] = F"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(a_ , match=re.escape(a_ ) ):
metric.compute(predictions=[] , references=[] , scheme=a_ )
| 700 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ : str = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase__ : Union[str, Any] = {
'yjernite/retribert-base-uncased': 512,
}
UpperCamelCase__ : Dict = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase = RetriBertTokenizer
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) -> Tuple:
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
A_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCamelCase ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_lowerCamelCase , normalizer_state.pop("""type""" ) )
A_ : List[str] = do_lower_case
A_ : List[Any] = strip_accents
A_ : Optional[int] = tokenize_chinese_chars
A_ : int = normalizer_class(**_lowerCamelCase )
A_ : Tuple = do_lower_case
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None ) -> Any:
A_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
A_ : Optional[int] = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
A_ : Dict = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 385 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"tokenizer_file": "tokenizer.json"}
lowerCamelCase_ = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class __a ( __lowerCamelCase ):
"""simple docstring"""
_A : Tuple = VOCAB_FILES_NAMES
_A : Dict = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = ["input_ids", "attention_mask"]
_A : List[str] = None
def __init__( self : Dict ,_UpperCamelCase : List[Any]=None ,_UpperCamelCase : Any=None ,_UpperCamelCase : Dict=None ,_UpperCamelCase : List[Any]="<unk>" ,_UpperCamelCase : List[str]="<s>" ,_UpperCamelCase : str="</s>" ,_UpperCamelCase : Any="<pad>" ,_UpperCamelCase : List[Any]=False ,_UpperCamelCase : Any=False ,**_UpperCamelCase : List[str] ,) -> int:
'''simple docstring'''
super().__init__(
_UpperCamelCase ,_UpperCamelCase ,tokenizer_file=_UpperCamelCase ,unk_token=_UpperCamelCase ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase ,**_UpperCamelCase ,)
SCREAMING_SNAKE_CASE__ =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_UpperCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ =getattr(_UpperCamelCase ,pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ =add_prefix_space
SCREAMING_SNAKE_CASE__ =pre_tok_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =add_prefix_space
def __A ( self : Optional[Any] ,*_UpperCamelCase : List[Any] ,**_UpperCamelCase : List[str] ) -> BatchEncoding:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =kwargs.get("""is_split_into_words""" ,_UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*_UpperCamelCase ,**_UpperCamelCase )
def __A ( self : str ,*_UpperCamelCase : Any ,**_UpperCamelCase : Dict ) -> BatchEncoding:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =kwargs.get("""is_split_into_words""" ,_UpperCamelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._encode_plus(*_UpperCamelCase ,**_UpperCamelCase )
def __A ( self : Dict ,_UpperCamelCase : str ,_UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self._tokenizer.model.save(_UpperCamelCase ,name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def __A ( self : Dict ,_UpperCamelCase : "Conversation" ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE__ =input_ids[-self.model_max_length :]
return input_ids
| 151 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =torch.load(__UpperCamelCase, map_location="""cpu""" )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__ =torch.load(__UpperCamelCase, map_location="""cpu""" )["""model"""]
# pop unnecessary weights
SCREAMING_SNAKE_CASE__ =[
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ ={
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__ =sd.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__ =sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__ =key.replace(""".qkv_proj.""", """.q_proj.""" )
SCREAMING_SNAKE_CASE__ =key.replace(""".qkv_proj.""", """.k_proj.""" )
SCREAMING_SNAKE_CASE__ =key.replace(""".qkv_proj.""", """.v_proj.""" )
SCREAMING_SNAKE_CASE__ =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =torch.split(__UpperCamelCase, depth // 3, dim=0 )
SCREAMING_SNAKE_CASE__ =q
SCREAMING_SNAKE_CASE__ =k
SCREAMING_SNAKE_CASE__ =v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase=None ):
SCREAMING_SNAKE_CASE__ =load_checkpoint(__UpperCamelCase )
if config is not None:
SCREAMING_SNAKE_CASE__ =OPTConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ =OPTConfig()
SCREAMING_SNAKE_CASE__ =OPTModel(__UpperCamelCase ).half().eval()
model.load_state_dict(__UpperCamelCase )
# Check results
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
lowerCamelCase_ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 151 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
a__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
a__ = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
a__ = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
a__ = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a__ = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
a__ = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def lowercase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
_snake_case : Optional[Any] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , SCREAMING_SNAKE_CASE__ )
return [m.group(0 ) for m in matches]
def lowercase ( ) -> Any:
_snake_case : Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_snake_case : Optional[int] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_snake_case : List[Any] = collections.defaultdict(SCREAMING_SNAKE_CASE__ )
_snake_case : str = collections.defaultdict(SCREAMING_SNAKE_CASE__ )
_snake_case : List[str] = collections.defaultdict(SCREAMING_SNAKE_CASE__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(SCREAMING_SNAKE_CASE__ ):
_snake_case : List[str] = None
if _re_tf_models.match(SCREAMING_SNAKE_CASE__ ) is not None:
_snake_case : Tuple = tf_models
_snake_case : str = _re_tf_models.match(SCREAMING_SNAKE_CASE__ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE__ ) is not None:
_snake_case : Optional[Any] = flax_models
_snake_case : List[str] = _re_flax_models.match(SCREAMING_SNAKE_CASE__ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE__ ) is not None:
_snake_case : List[Any] = pt_models
_snake_case : List[Any] = _re_pt_models.match(SCREAMING_SNAKE_CASE__ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE__ ) > 0:
if attr_name in model_prefix_to_model_type:
_snake_case : Tuple = True
break
# Try again after removing the last word in the name
_snake_case : Tuple = """""".join(camel_case_split(SCREAMING_SNAKE_CASE__ )[:-1] )
_snake_case : str = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_snake_case : Dict = list(SCREAMING_SNAKE_CASE__ )
all_models.sort()
_snake_case : Union[str, Any] = {"""model_type""": all_models}
_snake_case : Optional[Any] = [pt_models[t] for t in all_models]
_snake_case : List[Any] = [tf_models[t] for t in all_models]
_snake_case : Union[str, Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_snake_case : int = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_snake_case : Tuple = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_snake_case : List[Any] = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_snake_case : Any = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_snake_case : int = """AutoTokenizer"""
_snake_case : Optional[int] = [processors[t] for t in all_models]
return pd.DataFrame(SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
_snake_case : Tuple = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_snake_case : str = [model_mapping, F'''TF_{model_mapping}''', F'''FLAX_{model_mapping}''']
_snake_case : Union[str, Any] = [auto_class, F'''TF_{auto_class}''', F'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# The type of pipeline may not exist in this framework
if not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
continue
# First extract all model_names
_snake_case : Dict = []
for name in getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).values():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
model_names.append(SCREAMING_SNAKE_CASE__ )
else:
model_names.extend(list(SCREAMING_SNAKE_CASE__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> int:
_snake_case : Tuple = get_frameworks_table()
_snake_case : Union[str, Any] = Dataset.from_pandas(SCREAMING_SNAKE_CASE__ )
_snake_case : str = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = Dataset.from_json(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(SCREAMING_SNAKE_CASE__ ) )
}
_snake_case : List[str] = update_pipeline_and_auto_class_table(SCREAMING_SNAKE_CASE__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_snake_case : Any = sorted(table.keys() )
_snake_case : Union[str, Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
_snake_case : Any = Dataset.from_pandas(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE__ , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE__ , """pipeline_tags.json""" ) )
if commit_sha is not None:
_snake_case : str = (
F'''Update with commit {commit_sha}\n\nSee: '''
F'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
_snake_case : Any = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" , token=SCREAMING_SNAKE_CASE__ , commit_message=SCREAMING_SNAKE_CASE__ , )
def lowercase ( ) -> Optional[int]:
_snake_case : List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_snake_case : int = transformers_module.pipelines.SUPPORTED_TASKS
_snake_case : Dict = []
for key in pipeline_tasks:
if key not in in_table:
_snake_case : Union[str, Any] = pipeline_tasks[key]["""pt"""]
if isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
_snake_case : str = model[0]
_snake_case : Any = model.__name__
if model not in in_table.values():
missing.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_snake_case : Optional[Any] = """, """.join(SCREAMING_SNAKE_CASE__ )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
a__ = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 198 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a__ = """src/diffusers"""
# Matches is_xxx_available()
a__ = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a__ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a__ = """
{0} = None
"""
a__ = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a__ = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
_snake_case : str = _re_backend.findall(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE__ )
def lowercase ( ) -> Optional[Any]:
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case : Dict = f.readlines()
# Get to the point we do the actual imports for type checking
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
_snake_case : int = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
_snake_case : str = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE__ ) and len(lines[line_index] ) > 1:
_snake_case : int = lines[line_index]
_snake_case : str = _re_single_line_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_snake_case : Optional[int] = objects
else:
line_index += 1
return backend_specific_objects
def lowercase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE__ )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict=None ) -> List[Any]:
if backend_specific_objects is None:
_snake_case : Dict = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
_snake_case : Union[str, Any] = {}
for backend, objects in backend_specific_objects.items():
_snake_case : List[Any] = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
_snake_case : Any = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for o in objects] )
_snake_case : Any = dummy_file
return dummy_files
def lowercase ( SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Optional[int]:
_snake_case : str = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
_snake_case : List[Any] = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
_snake_case : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , """utils""" )
_snake_case : int = {
backend: os.path.join(SCREAMING_SNAKE_CASE__ , F'''dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}_objects.py''' )
for backend in dummy_files.keys()
}
_snake_case : int = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_snake_case : Union[str, Any] = f.read()
else:
_snake_case : str = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a__ = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 198 | 1 |
from __future__ import annotations
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = sorted(numsa + numsa )
__lowercase , __lowercase = divmod(len(lowerCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : Optional[int] = [float(x) for x in input("""Enter the elements of first array: """).split()]
__UpperCamelCase : Optional[int] = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 80 |
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = [[] for _ in range(lowerCamelCase )]
__lowercase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(lowerCamelCase ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase )
__lowercase = ["""""".join(lowerCamelCase ) for row in temp_grid]
__lowercase = """""".join(lowerCamelCase )
return output_string
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__lowercase = []
__lowercase = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
__lowercase = [[] for _ in range(lowerCamelCase )] # generates template
for position in range(len(lowerCamelCase ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
__lowercase = 0
for row in temp_grid: # fills in the characters
__lowercase = input_string[counter : counter + len(lowerCamelCase )]
grid.append(list(lowerCamelCase ) )
counter += len(lowerCamelCase )
__lowercase = """""" # reads as zigzag
for position in range(len(lowerCamelCase ) ):
__lowercase = position % (lowest * 2) # puts it in bounds
__lowercase = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {}
for key_guess in range(1 , len(lowerCamelCase ) ): # tries every key
__lowercase = decrypt(lowerCamelCase , lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
'''simple docstring'''
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
lowercase_ : Union[str, Any] = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
lowercase_ ,lowercase_ : Any = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase_ : Dict = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a__ )
assert base_extractor.is_extractable(a__ )
lowercase_ : str = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(a__ , a__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ : Tuple = file_path.read_text(encoding='utf-8' )
else:
lowercase_ : Union[str, Any] = output_path.read_text(encoding='utf-8' )
lowercase_ : Optional[int] = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
lowercase_ : int = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
lowercase_ : Any = input_paths[compression_format]
if input_path is None:
lowercase_ : Any = f'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(a__ )
lowercase_ : Dict = Extractor.infer_extractor_format(a__ )
assert extractor_format is not None
lowercase_ : Tuple = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(a__ , a__ , a__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ : Any = file_path.read_text(encoding='utf-8' )
else:
lowercase_ : Optional[int] = output_path.read_text(encoding='utf-8' )
lowercase_ : int = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
import tarfile
lowercase_ : Tuple = tmp_path / 'data_dot_dot'
directory.mkdir()
lowercase_ : List[str] = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(a__ , 'w' ) as f:
f.add(a__ , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
import tarfile
lowercase_ : int = tmp_path / 'data_sym_link'
directory.mkdir()
lowercase_ : Any = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=a__ )
with tarfile.TarFile(a__ , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : Union[str, Any] = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
lowercase_ : Union[str, Any] = insecure_tar_files[insecure_tar_file]
lowercase_ : Optional[int] = tmp_path / 'extracted'
TarExtractor.extract(a__ , a__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
lowercase_ : Optional[Any] = (
B'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
B'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
B'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
B'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(a__ )
assert zipfile.is_zipfile(str(a__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(a__ ) # but we're right
| 716 | '''simple docstring'''
import os
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[Any] = len(grid[0] )
lowercase_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = 0
lowercase_ : int = 0
lowercase_ : Union[str, Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(n_rows - 3 ):
lowercase_ : Dict = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase_ : Optional[Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase_ : Tuple = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase_ : int = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase_ : Optional[int] = max(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if max_product > largest:
lowercase_ : List[Any] = max_product
return largest
def _UpperCamelCase ( ):
lowercase_ : int = []
with open(os.path.dirname(SCREAMING_SNAKE_CASE_ ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
lowercase_ : Dict = [[int(SCREAMING_SNAKE_CASE_ ) for i in grid[j]] for j in range(len(SCREAMING_SNAKE_CASE_ ) )]
return largest_product(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(solution())
| 438 | 0 |
def _UpperCamelCase ( lowerCAmelCase_ ) ->str:
UpperCAmelCase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCAmelCase = """"""
UpperCAmelCase = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCAmelCase_ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCAmelCase , UpperCAmelCase = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCAmelCase = [1 for i in range(len(lowerCAmelCase_ ) )]
# for each character in new_string find corresponding palindromic string
UpperCAmelCase = 0
for j in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCAmelCase_ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCAmelCase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCAmelCase = j - k + 1 # noqa: E741
UpperCAmelCase = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCAmelCase = length[j]
UpperCAmelCase = j
# create that string
UpperCAmelCase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 377 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[str]:
# word like '180' or '身高' or '神'
for char in word:
UpperCAmelCase = ord(lowerCAmelCase_ )
if not _is_chinese_char(lowerCAmelCase_ ):
return 0
return 1
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[int]:
UpperCAmelCase = set()
for token in tokens:
UpperCAmelCase = len(lowerCAmelCase_ ) > 1 and is_chinese(lowerCAmelCase_ )
if chinese_word:
word_set.add(lowerCAmelCase_ )
UpperCAmelCase = list(lowerCAmelCase_ )
return word_list
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Union[str, Any]:
if not chinese_word_set:
return bert_tokens
UpperCAmelCase = max([len(lowerCAmelCase_ ) for w in chinese_word_set] )
UpperCAmelCase = bert_tokens
UpperCAmelCase , UpperCAmelCase = 0, len(lowerCAmelCase_ )
while start < end:
UpperCAmelCase = True
if is_chinese(bert_word[start] ):
UpperCAmelCase = min(end - start , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ , 1 , -1 ):
UpperCAmelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase = """##""" + bert_word[j]
UpperCAmelCase = start + i
UpperCAmelCase = False
break
if single_word:
start += 1
return bert_word
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
UpperCAmelCase = []
for i in range(0 , len(lowerCAmelCase_ ) , 1_0_0 ):
UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["""cws"""] ).cws
UpperCAmelCase = [get_chinese_word(lowerCAmelCase_ ) for r in res]
ltp_res.extend(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
UpperCAmelCase = []
for i in range(0 , len(lowerCAmelCase_ ) , 1_0_0 ):
UpperCAmelCase = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=5_1_2 )
bert_res.extend(res["""input_ids"""] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
UpperCAmelCase = []
for input_ids, chinese_word in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase = []
for id in input_ids:
UpperCAmelCase = bert_tokenizer._convert_id_to_token(lowerCAmelCase_ )
input_tokens.append(lowerCAmelCase_ )
UpperCAmelCase = add_sub_symbol(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase_ ):
if token[:2] == "##":
UpperCAmelCase = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase_ ) == 1 and _is_chinese_char(ord(lowerCAmelCase_ ) ):
ref_id.append(lowerCAmelCase_ )
ref_ids.append(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
return ref_ids
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [line.strip() for line in data if len(lowerCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase = prepare_ref(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase = [json.dumps(lowerCAmelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(lowerCAmelCase_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
__a = parser.parse_args()
main(args)
| 377 | 1 |
import argparse
_a = "docs/source/_static/js/custom.js"
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
with open(__snake_case ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCamelCase__ = f.readlines()
lowerCamelCase__ = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
lowerCamelCase__ = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(__snake_case ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_a = parser.parse_args()
update_custom_js(args.version)
| 29 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> float:
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,) -> float:
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
__snake_case ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
return 1 / (1 + np.exp(-z ))
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Tuple ):
return (-y * np.log(UpperCamelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = np.dot(UpperCamelCase__ , UpperCamelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase__ ) ) )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any]=70_000 ):
SCREAMING_SNAKE_CASE__ = np.zeros(x.shape[1] )
for iterations in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = np.dot(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = sigmoid_function(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ = np.dot(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = sigmoid_function(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cost_function(UpperCamelCase__ , UpperCamelCase__ )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_lowerCamelCase = datasets.load_iris()
_lowerCamelCase = iris.data[:, :2]
_lowerCamelCase = (iris.target != 0) * 1
_lowerCamelCase = 0.1
_lowerCamelCase = logistic_reg(alpha, x, y, max_iterations=70000)
print('theta: ', theta) # printing the theta i.e our weights vector
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
return sigmoid_function(
np.dot(UpperCamelCase__ , UpperCamelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((_lowerCamelCase) , (_lowerCamelCase)) = (x[:, 0].min(), x[:, 0].max())
((_lowerCamelCase) , (_lowerCamelCase)) = (x[:, 1].min(), x[:, 1].max())
((_lowerCamelCase) , (_lowerCamelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_lowerCamelCase = np.c_[xxa.ravel(), xxa.ravel()]
_lowerCamelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show() | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE__ = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE__ = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''') | 6 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list ):
for i in range(len(UpperCamelCase__ ) - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__ = False
for j in range(UpperCamelCase__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = unsorted[j - 1], unsorted[j]
SCREAMING_SNAKE_CASE__ = True
for j in range(UpperCamelCase__ ):
if unsorted[j] > unsorted[j + 1]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = unsorted[j + 1], unsorted[j]
SCREAMING_SNAKE_CASE__ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 706 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str ):
def get_masked_lm_array(UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_array(UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_layer_array(UpperCamelCase__: int , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_attention_layer_array(UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = array.reshape(UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
print(f'''Loading model based on config from {config_path}...''' )
SCREAMING_SNAKE_CASE__ = BertConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = BertForMaskedLM(UpperCamelCase__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ = model.bert.encoder.layer[layer_index]
# Self-attention
SCREAMING_SNAKE_CASE__ = layer.attention.self
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_query_dense/bias""" , self_attn.query.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_key_dense/bias""" , self_attn.key.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
SCREAMING_SNAKE_CASE__ = layer.attention.output
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_output_dense/bias""" , self_output.dense.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_attention_layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_attention_layer_norm/beta""" )
# Intermediate
SCREAMING_SNAKE_CASE__ = layer.intermediate
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_intermediate_dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_intermediate_dense/bias""" )
# Output
SCREAMING_SNAKE_CASE__ = layer.output
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_dense/bias""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_layer_norm/beta""" )
# Embeddings
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_position_embedding_layer/embeddings""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_type_embedding_layer/embeddings""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_embedding_norm_layer/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
SCREAMING_SNAKE_CASE__ = model.cls.predictions.transform
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""dense/bias""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""layer_norm/beta""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""embedding_table""" )
# Pooling
SCREAMING_SNAKE_CASE__ = BertPooler(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_pooler_layer/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(UpperCamelCase__ )
# Integration test - should load without any errors ;)
SCREAMING_SNAKE_CASE__ = BertForMaskedLM.from_pretrained(UpperCamelCase__ )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowerCamelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 59 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''blip_2_vision_model'''
def __init__(self , SCREAMING_SNAKE_CASE_=14_08 , SCREAMING_SNAKE_CASE_=61_44 , SCREAMING_SNAKE_CASE_=39 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0_00_01 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=1e-1_0 , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = qkv_bias
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
SCREAMING_SNAKE_CASE_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''blip_2_qformer'''
def __init__(self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-1_2 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=14_08 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = cross_attention_frequency
SCREAMING_SNAKE_CASE_ = encoder_hidden_size
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
SCREAMING_SNAKE_CASE_ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''blip-2'''
UpperCAmelCase__ = True
def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=32 , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
if vision_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
SCREAMING_SNAKE_CASE_ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
SCREAMING_SNAKE_CASE_ = BlipaVisionConfig(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = BlipaQFormerConfig(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.text_config.tie_word_embeddings
SCREAMING_SNAKE_CASE_ = self.text_config.is_encoder_decoder
SCREAMING_SNAKE_CASE_ = num_query_tokens
SCREAMING_SNAKE_CASE_ = self.vision_config.hidden_size
SCREAMING_SNAKE_CASE_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ = 1.0
SCREAMING_SNAKE_CASE_ = 0.02
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.qformer_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.text_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 626 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (UnCLIPScheduler,)
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , prev_timestep=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_54_96_25 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_99_49_87 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=SCREAMING_SNAKE_CASE_ ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=SCREAMING_SNAKE_CASE_ ) - -5.7_99_80_52 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=SCREAMING_SNAKE_CASE_ ) - -0.0_01_00_11 < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
SCREAMING_SNAKE_CASE_ = pred_prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1e-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE_ = scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE_ = None
else:
SCREAMING_SNAKE_CASE_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ = scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prev_timestep=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
SCREAMING_SNAKE_CASE_ = pred_prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1e-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
pass | 626 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__UpperCamelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )]
__UpperCamelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__UpperCamelCase = jax.random.PRNGKey(0 )
__UpperCamelCase = 4
__UpperCamelCase = jax.device_count()
__UpperCamelCase = num_samples * [prompt]
__UpperCamelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
__UpperCamelCase = replicate(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = shard(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
__UpperCamelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def __lowercase( self ) -> str:
__UpperCamelCase , __UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__UpperCamelCase = jax.random.PRNGKey(0 )
__UpperCamelCase = 50
__UpperCamelCase = jax.device_count()
__UpperCamelCase = num_samples * [prompt]
__UpperCamelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
__UpperCamelCase = replicate(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = shard(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __lowercase( self ) -> int:
__UpperCamelCase , __UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__UpperCamelCase = jax.random.PRNGKey(0 )
__UpperCamelCase = 50
__UpperCamelCase = jax.device_count()
__UpperCamelCase = num_samples * [prompt]
__UpperCamelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
__UpperCamelCase = replicate(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = shard(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __lowercase( self ) -> Any:
__UpperCamelCase , __UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
__UpperCamelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__UpperCamelCase = jax.random.PRNGKey(0 )
__UpperCamelCase = 50
__UpperCamelCase = jax.device_count()
__UpperCamelCase = num_samples * [prompt]
__UpperCamelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
__UpperCamelCase = replicate(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = shard(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __lowercase( self ) -> str:
__UpperCamelCase = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
__UpperCamelCase , __UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
__UpperCamelCase = scheduler.create_state()
__UpperCamelCase = scheduler_state
__UpperCamelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__UpperCamelCase = jax.random.PRNGKey(0 )
__UpperCamelCase = 50
__UpperCamelCase = jax.device_count()
__UpperCamelCase = num_samples * [prompt]
__UpperCamelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
__UpperCamelCase = replicate(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = shard(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__UpperCamelCase = jax.device_count()
__UpperCamelCase = num_samples * [prompt]
__UpperCamelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
__UpperCamelCase = replicate(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = shard(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__UpperCamelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__UpperCamelCase , __UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
__UpperCamelCase = replicate(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = shard(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__UpperCamelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 567 |
def _a ( __lowercase ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class __magic_name__ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = jnp.floataa
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self :Any , snake_case :Optional[int] ):
'''simple docstring'''
A_ , A_ , A_ , A_ : int = hidden_states.shape
A_ : Any = jax.image.resize(
snake_case , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
A_ : Tuple = self.conv(snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = jnp.floataa
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self :List[str] , snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : int = self.conv(snake_case )
return hidden_states
class __magic_name__ ( nn.Module ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = 0.0
__UpperCamelCase = None
__UpperCamelCase = jnp.floataa
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : str = self.in_channels if self.out_channels is None else self.out_channels
A_ : int = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
A_ : Tuple = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A_ : Dict = nn.Dense(snake_case , dtype=self.dtype )
A_ : int = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
A_ : int = nn.Dropout(self.dropout_prob )
A_ : str = nn.Conv(
snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
A_ : Tuple = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
A_ : List[str] = None
if use_nin_shortcut:
A_ : str = nn.Conv(
snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self :List[Any] , snake_case :Optional[Any] , snake_case :Optional[Any] , snake_case :List[Any]=True ):
'''simple docstring'''
A_ : Optional[int] = hidden_states
A_ : Optional[int] = self.norma(snake_case )
A_ : List[Any] = nn.swish(snake_case )
A_ : Any = self.conva(snake_case )
A_ : Optional[Any] = self.time_emb_proj(nn.swish(snake_case ) )
A_ : Optional[int] = jnp.expand_dims(jnp.expand_dims(snake_case , 1 ) , 1 )
A_ : List[str] = hidden_states + temb
A_ : str = self.norma(snake_case )
A_ : List[str] = nn.swish(snake_case )
A_ : Union[str, Any] = self.dropout(snake_case , snake_case )
A_ : Tuple = self.conva(snake_case )
if self.conv_shortcut is not None:
A_ : str = self.conv_shortcut(snake_case )
return hidden_states + residual
| 454 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __snake_case ( _lowerCAmelCase : Optional[int] ) -> Dict:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __snake_case ( _lowerCAmelCase : Tuple ) -> Optional[Any]:
A_ : Optional[Any] = create_tensor(_lowerCAmelCase )
A_ : Dict = gather(_lowerCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __snake_case ( _lowerCAmelCase : List[Any] ) -> Any:
A_ : int = [state.process_index]
A_ : Union[str, Any] = gather_object(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == state.num_processes, f"{gathered_obj}, {len(_lowerCAmelCase )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), f"{gathered_obj} != {list(range(state.num_processes ) )}"
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
A_ : List[str] = create_tensor(_lowerCAmelCase )
A_ : Optional[Any] = broadcast(_lowerCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __snake_case ( _lowerCAmelCase : Dict ) -> str:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
A_ : Tuple = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A_ : List[str] = torch.arange(state.num_processes ).to(state.device )
A_ : Any = pad_across_processes(_lowerCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __snake_case ( _lowerCAmelCase : List[Any] ) -> Tuple:
# For now runs on only two processes
if state.num_processes != 2:
return
A_ : str = create_tensor(_lowerCAmelCase )
A_ : int = reduce(_lowerCAmelCase , "sum" )
A_ : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
A_ : List[str] = create_tensor(_lowerCAmelCase )
A_ : Tuple = reduce(_lowerCAmelCase , "mean" )
A_ : List[str] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), f"{reduced_tensor} != {truth_tensor}"
def __snake_case ( _lowerCAmelCase : List[str] ) -> Dict:
# For xla_spawn (TPUs)
main()
def __snake_case ( ) -> List[str]:
A_ : Tuple = PartialState()
state.print(f"State: {state}" )
state.print("testing gather" )
test_gather(_lowerCAmelCase )
state.print("testing gather_object" )
test_gather_object(_lowerCAmelCase )
state.print("testing broadcast" )
test_broadcast(_lowerCAmelCase )
state.print("testing pad_across_processes" )
test_pad_across_processes(_lowerCAmelCase )
state.print("testing reduce_sum" )
test_reduce_sum(_lowerCAmelCase )
state.print("testing reduce_mean" )
test_reduce_mean(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 454 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = XGLMTokenizer
UpperCAmelCase__ : List[str] = XGLMTokenizerFast
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : int = True
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XGLMTokenizer(_snake_case , keep_accents=_snake_case)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''<pad>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(len(_snake_case) , 1008)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = XGLMTokenizer(_snake_case , keep_accents=_snake_case)
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_snake_case)
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_snake_case)
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_snake_case , f.name)
UpperCAmelCase_ = XGLMTokenizer(f.name , keep_accents=_snake_case)
UpperCAmelCase_ = pickle.dumps(_snake_case)
pickle.loads(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
UpperCAmelCase_ = rust_tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
UpperCAmelCase_ = rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(_snake_case)
UpperCAmelCase_ = rust_tokenizer.encode(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [2, 31227, 4447, 35]
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case))
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_snake_case , self.big_tokenizer.encode(_snake_case))
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/xglm-564M''' , padding=_snake_case , )
| 169 |
def A (__A : list , __A : list , __A : int , __A : int , __A : int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = knapsack(__A , __A , __A , __A , index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ = values[index] + knapsack(
__A , __A , __A , max_weight - weights[index] , index + 1 )
return max(__A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''nllb-moe'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : int , A_ : Any=128112 , A_ : List[str]=1024 , A_ : Any=12 , A_ : Dict=4096 , A_ : Any=16 , A_ : str=12 , A_ : Union[str, Any]=4096 , A_ : Tuple=16 , A_ : Union[str, Any]=0.05 , A_ : Tuple=0.05 , A_ : Any=True , A_ : Tuple=True , A_ : Tuple="relu" , A_ : List[Any]=1024 , A_ : Union[str, Any]=0.1 , A_ : Optional[int]=0.1 , A_ : Union[str, Any]=0.0 , A_ : int=0.02 , A_ : Any=2 , A_ : Tuple=True , A_ : Union[str, Any]=False , A_ : Any="float32" , A_ : Dict=False , A_ : List[str]=128 , A_ : Dict=64 , A_ : List[Any]=4 , A_ : Tuple=4 , A_ : Any=0.001 , A_ : List[Any]=0.001 , A_ : int="all" , A_ : int=False , A_ : Optional[Any]=False , A_ : Optional[Any]=1.0 , A_ : List[Any]=0.2 , A_ : Any=1 , A_ : Union[str, Any]=0 , A_ : int=2 , A_ : List[str]=False , **A_ : str , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = use_cache
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ = router_z_loss_coef
lowerCamelCase_ = router_aux_loss_coef
lowerCamelCase_ = decoder_sparse_step
lowerCamelCase_ = encoder_sparse_step
lowerCamelCase_ = num_experts
lowerCamelCase_ = expert_capacity
lowerCamelCase_ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" )
lowerCamelCase_ = router_dtype
lowerCamelCase_ = router_ignore_padding_tokens
lowerCamelCase_ = batch_prioritized_routing
lowerCamelCase_ = second_expert_policy
lowerCamelCase_ = normalize_router_prob_before_dropping
lowerCamelCase_ = moe_eval_capacity_token_fraction
lowerCamelCase_ = moe_token_dropout
lowerCamelCase_ = output_router_logits
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
| 70 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_lowerCAmelCase : Union[str, Any] = threading.Lock()
_lowerCAmelCase : Optional[logging.Handler] = None
_lowerCAmelCase : Union[str, Any] = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
_lowerCAmelCase : Dict = logging.WARNING
_lowerCAmelCase : Optional[Any] = True
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = os.getenv("TRANSFORMERS_VERBOSITY" , snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
f'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def SCREAMING_SNAKE_CASE__ ( )-> str:
'''simple docstring'''
return __name__.split("." )[0]
def SCREAMING_SNAKE_CASE__ ( )-> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase__ : Any = logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase__ : Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase__ : int = False
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase__ : Optional[int] = None
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
return log_levels
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None )-> logging.Logger:
'''simple docstring'''
if name is None:
UpperCAmelCase__ : Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> int:
'''simple docstring'''
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> None:
'''simple docstring'''
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
return set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
return set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> Tuple:
'''simple docstring'''
return set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> str:
'''simple docstring'''
return set_verbosity(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def SCREAMING_SNAKE_CASE__ ( snake_case : logging.Handler )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : logging.Handler )-> None:
'''simple docstring'''
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
UpperCAmelCase__ : Dict = False
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
_configure_library_root_logger()
UpperCAmelCase__ : List[Any] = True
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase__ : Union[str, Any] = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : Any = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *snake_case : List[str] , **snake_case : str )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , snake_case )
if no_advisory_warnings:
return
self.warning(*snake_case , **snake_case )
_lowerCAmelCase : int = warning_advice
@functools.lru_cache(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *snake_case : Dict , **snake_case : Any )-> Any:
'''simple docstring'''
self.warning(*snake_case , **snake_case )
_lowerCAmelCase : Tuple = warning_once
class lowerCAmelCase__ :
def __init__( self : List[str] , *snake_case__ : Any , **snake_case__ : List[str] ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCAmelCase__ : List[Any] = args[0] if args else None
def __iter__( self : Any ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
def empty_fn(*snake_case__ : Dict , **snake_case__ : str ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ):
'''simple docstring'''
return self
def __exit__( self : Tuple , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Optional[int] ):
'''simple docstring'''
return
class lowerCAmelCase__ :
def __call__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def __a ( self : Dict , *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_lowerCAmelCase : Optional[int] = _tqdm_cls()
def SCREAMING_SNAKE_CASE__ ( )-> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
global _tqdm_active
UpperCAmelCase__ : int = True
hf_hub_utils.enable_progress_bars()
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
global _tqdm_active
UpperCAmelCase__ : Optional[Any] = False
hf_hub_utils.disable_progress_bars()
| 438 | 0 |
import math
def _a ( SCREAMING_SNAKE_CASE = 1_00 ):
"""simple docstring"""
lowercase__ = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 718 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
lowerCAmelCase = 5
lowerCAmelCase = 10
@require_sentencepiece
@require_tokenizers
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : List[str] = SpeechaTextTokenizer
_lowercase : List[str] = False
_lowercase : Optional[Any] = True
def lowerCamelCase_ ( self: Optional[Any] ) -> Any:
"""simple docstring"""
super().setUp()
lowercase__ = sp.SentencePieceProcessor()
spm_model.Load(UpperCamelCase_ )
lowercase__ = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(UpperCamelCase_ ) )]
lowercase__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase__ = Path(self.tmpdirname )
save_json(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase_ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowercase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = '''<pad>'''
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCamelCase_ ( self: Any ) -> str:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(UpperCamelCase_ ) , 1_001 )
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_001 )
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowercase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [289, 50, 14, 174, 386] , )
lowercase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
lowercase__ = {'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class _a ( unittest.TestCase ):
_lowercase : Union[str, Any] = '''valhalla/s2t_mustc_multilinguial_medium'''
_lowercase : int = '''C\'est trop cool'''
_lowercase : Any = '''Esto es genial'''
@classmethod
def lowerCamelCase_ ( cls: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11 )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10_000 )
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
lowercase__ = [ES_CODE, 4, 1_601, 47, 7_647, 2]
lowercase__ = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowercase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def lowerCamelCase_ ( self: int ) -> List[Any]:
"""simple docstring"""
lowercase__ = '''fr'''
lowercase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , UpperCamelCase_ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowercase__ = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 429 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowerCamelCase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 288 |
'''simple docstring'''
from math import pow
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
A_ = int(pow(UpperCAmelCase__, UpperCAmelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
A_ , A_ = backtrack(
UpperCAmelCase__, UpperCAmelCase__, current_number + 1, UpperCAmelCase__, UpperCAmelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
A_ , A_ = backtrack(
UpperCAmelCase__, UpperCAmelCase__, current_number + 1, UpperCAmelCase__, UpperCAmelCase__ )
return current_sum, solutions_count
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(UpperCAmelCase__, UpperCAmelCase__, 1, 0, 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 | 1 |
from __future__ import annotations
import math
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : bool , __lowerCAmelCase : list[int] , __lowerCAmelCase : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(__lowerCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , )
def __lowercase ( ):
a__ = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
a__ = math.log(len(__lowerCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 657 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__a: Union[str, Any] = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def _SCREAMING_SNAKE_CASE ( __snake_case = "dhaka" , __snake_case = 5 ) -> int:
_UpperCAmelCase = min(__snake_case , 5_0 ) # Prevent abuse!
_UpperCAmelCase = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_UpperCAmelCase = requests.get("""https://www.google.com/search""" , params=__snake_case , headers=__snake_case )
_UpperCAmelCase = BeautifulSoup(html.text , """html.parser""" )
_UpperCAmelCase = """""".join(
re.findall(r"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
_UpperCAmelCase = json.dumps(__snake_case )
_UpperCAmelCase = json.loads(__snake_case )
_UpperCAmelCase = re.findall(
r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , __snake_case , )
if not matched_google_image_data:
return 0
_UpperCAmelCase = re.sub(
r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(__snake_case ) , )
_UpperCAmelCase = re.findall(
r"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , __snake_case , )
for index, fixed_full_res_image in enumerate(__snake_case ):
if index >= max_images:
return index
_UpperCAmelCase = bytes(__snake_case , """ascii""" ).decode(
"""unicode-escape""" )
_UpperCAmelCase = bytes(__snake_case , """ascii""" ).decode(
"""unicode-escape""" )
_UpperCAmelCase = urllib.request.build_opener()
_UpperCAmelCase = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(__snake_case )
_UpperCAmelCase = f"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
urllib.request.urlretrieve( # noqa: S310
__snake_case , f"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__a: Optional[int] = download_images_from_google_query(sys.argv[1])
print(F"{image_count} images were downloaded to disk.")
except IndexError:
print('''Please provide a search term.''')
raise | 108 |
'''simple docstring'''
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Tuple = len(SCREAMING_SNAKE_CASE__ )
__a : int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__a : int = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__a : Union[str, Any] = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__a : List[str] = subset[i - 1][j]
if arr[i - 1] <= j:
__a : Union[str, Any] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase , **lowercase ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase , )
return fn(*lowercase , **lowercase )
return _inner_fn | 697 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def _lowerCamelCase( __snake_case , __snake_case=False ) -> List[Any]:
__snake_case = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
__snake_case = "segformer.encoder." + key
if key.startswith("backbone" ):
__snake_case = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__snake_case = key[key.find("patch_embed" ) + len("patch_embed" )]
__snake_case = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(__snake_case )-1}""" )
if "norm" in key:
__snake_case = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__snake_case = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
__snake_case = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(__snake_case )-1}""" )
if "layer_norm1" in key:
__snake_case = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
__snake_case = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
__snake_case = key[key.find("block" ) + len("block" )]
__snake_case = key.replace(f"""block{idx}""" , f"""block.{int(__snake_case )-1}""" )
if "attn.q" in key:
__snake_case = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
__snake_case = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
__snake_case = key.replace("attn" , "attention.self" )
if "fc1" in key:
__snake_case = key.replace("fc1" , "dense1" )
if "fc2" in key:
__snake_case = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
__snake_case = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
__snake_case = key.replace("linear_fuse.conv" , "linear_fuse" )
__snake_case = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__snake_case = key[key.find("linear_c" ) + len("linear_c" )]
__snake_case = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(__snake_case )-1}""" )
if key.startswith("head" ):
__snake_case = key.replace("head" , "classifier" )
__snake_case = value
return new_state_dict
def _lowerCamelCase( __snake_case , __snake_case ) -> Optional[Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__snake_case = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
__snake_case = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
__snake_case = kv_weight[
: config.hidden_sizes[i], :
]
__snake_case = kv_bias[: config.hidden_sizes[i]]
__snake_case = kv_weight[
config.hidden_sizes[i] :, :
]
__snake_case = kv_bias[
config.hidden_sizes[i] :
]
def _lowerCamelCase( ) -> Any:
__snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return image
@torch.no_grad()
def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> str:
__snake_case = SegformerConfig()
__snake_case = False
# set attributes based on model_name
__snake_case = "huggingface/label-files"
if "segformer" in model_name:
__snake_case = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
__snake_case = 150
__snake_case = "ade20k-id2label.json"
__snake_case = (1, 150, 128, 128)
elif "city" in model_name:
__snake_case = 19
__snake_case = "cityscapes-id2label.json"
__snake_case = (1, 19, 128, 128)
else:
raise ValueError(f"""Model {model_name} not supported""" )
elif "mit" in model_name:
__snake_case = True
__snake_case = model_name[4:6]
__snake_case = 1000
__snake_case = "imagenet-1k-id2label.json"
__snake_case = (1, 1000)
else:
raise ValueError(f"""Model {model_name} not supported""" )
# set config attributes
__snake_case = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
__snake_case = {int(__snake_case ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
__snake_case = [64, 128, 320, 512]
__snake_case = 256
elif size == "b2":
__snake_case = [64, 128, 320, 512]
__snake_case = 768
__snake_case = [3, 4, 6, 3]
elif size == "b3":
__snake_case = [64, 128, 320, 512]
__snake_case = 768
__snake_case = [3, 4, 18, 3]
elif size == "b4":
__snake_case = [64, 128, 320, 512]
__snake_case = 768
__snake_case = [3, 8, 27, 3]
elif size == "b5":
__snake_case = [64, 128, 320, 512]
__snake_case = 768
__snake_case = [3, 6, 40, 3]
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor (only resize + normalize)
__snake_case = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__snake_case , align=__snake_case , do_random_crop=__snake_case )
# prepare image
__snake_case = prepare_img()
__snake_case = image_processor(images=__snake_case , return_tensors="pt" ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
__snake_case = torch.load(__snake_case , map_location=torch.device("cpu" ) )
else:
__snake_case = torch.load(__snake_case , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
__snake_case = rename_keys(__snake_case , encoder_only=__snake_case )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__snake_case , __snake_case )
# create HuggingFace model and load state dict
if encoder_only:
__snake_case = False
__snake_case = SegformerForImageClassification(__snake_case )
else:
__snake_case = SegformerForSemanticSegmentation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# forward pass
__snake_case = model(__snake_case )
__snake_case = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
__snake_case = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
__snake_case = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
__snake_case = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
__snake_case = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
__snake_case = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
__snake_case = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
__snake_case = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
__snake_case = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
__snake_case = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
__snake_case = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
__snake_case = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
__snake_case = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
__snake_case = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
__snake_case = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
__snake_case = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
__snake_case = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCamelCase__ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 524 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
lowerCamelCase__ = '▁'
# Segments (not really needed)
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 2
lowerCamelCase__ = 3
lowerCamelCase__ = 4
class UpperCamelCase ( snake_case__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = """left"""
__UpperCamelCase = XLNetTokenizer
def __init__( self : List[str] ,_lowerCAmelCase : List[Any]=None ,_lowerCAmelCase : Optional[Any]=None ,_lowerCAmelCase : Optional[Any]=False ,_lowerCAmelCase : int=True ,_lowerCAmelCase : List[str]=False ,_lowerCAmelCase : Union[str, Any]="<s>" ,_lowerCAmelCase : str="</s>" ,_lowerCAmelCase : Tuple="<unk>" ,_lowerCAmelCase : Tuple="<sep>" ,_lowerCAmelCase : str="<pad>" ,_lowerCAmelCase : Any="<cls>" ,_lowerCAmelCase : Any="<mask>" ,_lowerCAmelCase : List[Any]=["<eop>", "<eod>"] ,**_lowerCAmelCase : Any ,):
"""simple docstring"""
__snake_case = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,remove_space=_lowerCAmelCase ,keep_accents=_lowerCAmelCase ,bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,**_lowerCAmelCase ,)
__snake_case = 3
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = False if not self.vocab_file else True
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : List[int] ,_lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase_ ( self : Any ,_lowerCAmelCase : List[int] ,_lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase_ ( self : List[Any] ,_lowerCAmelCase : str ,_lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case = os.path.join(
_lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file ,_lowerCAmelCase )
return (out_vocab_file,)
| 524 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase ) -> Dict:
a__ : List[Any] = checkpoint
a__ : List[Any] = {}
a__ : int = vae_state_dict["encoder.conv_in.weight"]
a__ : Any = vae_state_dict["encoder.conv_in.bias"]
a__ : Union[str, Any] = vae_state_dict["encoder.conv_out.weight"]
a__ : Optional[Any] = vae_state_dict["encoder.conv_out.bias"]
a__ : List[str] = vae_state_dict["encoder.norm_out.weight"]
a__ : Optional[int] = vae_state_dict["encoder.norm_out.bias"]
a__ : Optional[Any] = vae_state_dict["decoder.conv_in.weight"]
a__ : Dict = vae_state_dict["decoder.conv_in.bias"]
a__ : Union[str, Any] = vae_state_dict["decoder.conv_out.weight"]
a__ : Optional[int] = vae_state_dict["decoder.conv_out.bias"]
a__ : Dict = vae_state_dict["decoder.norm_out.weight"]
a__ : int = vae_state_dict["decoder.norm_out.bias"]
a__ : Any = vae_state_dict["quant_conv.weight"]
a__ : Any = vae_state_dict["quant_conv.bias"]
a__ : str = vae_state_dict["post_quant_conv.weight"]
a__ : Any = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
a__ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
a__ : Optional[int] = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
a__ : Dict = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
a__ : int = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(__UpperCamelCase )
}
for i in range(__UpperCamelCase ):
a__ : Tuple = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
a__ : Optional[Any] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
a__ : List[str] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
a__ : Optional[Any] = renew_vae_resnet_paths(__UpperCamelCase )
a__ : List[str] = {"old": F'down.{i}.block', "new": F'down_blocks.{i}.resnets'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : Any = [key for key in vae_state_dict if "encoder.mid.block" in key]
a__ : Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a__ : List[Any] = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
a__ : Dict = renew_vae_resnet_paths(__UpperCamelCase )
a__ : Union[str, Any] = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : Dict = [key for key in vae_state_dict if "encoder.mid.attn" in key]
a__ : Optional[int] = renew_vae_attention_paths(__UpperCamelCase )
a__ : str = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
for i in range(__UpperCamelCase ):
a__ : Optional[Any] = num_up_blocks - 1 - i
a__ : str = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
a__ : Dict = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
a__ : Optional[int] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
a__ : int = renew_vae_resnet_paths(__UpperCamelCase )
a__ : Optional[Any] = {"old": F'up.{block_id}.block', "new": F'up_blocks.{i}.resnets'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : Union[str, Any] = [key for key in vae_state_dict if "decoder.mid.block" in key]
a__ : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a__ : List[str] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
a__ : List[str] = renew_vae_resnet_paths(__UpperCamelCase )
a__ : Union[str, Any] = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
a__ : int = [key for key in vae_state_dict if "decoder.mid.attn" in key]
a__ : str = renew_vae_attention_paths(__UpperCamelCase )
a__ : List[str] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
return new_checkpoint
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , ) -> str:
# Only support V1
a__ : Optional[int] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
a__ : Any = io.BytesIO(r.content )
a__ : int = OmegaConf.load(__UpperCamelCase )
a__ : Any = 5_12
a__ : str = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
a__ : Optional[Any] = {}
with safe_open(__UpperCamelCase , framework="pt" , device="cpu" ) as f:
for key in f.keys():
a__ : Tuple = f.get_tensor(__UpperCamelCase )
else:
a__ : List[Any] = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )["state_dict"]
# Convert the VAE model.
a__ : Optional[int] = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase )
a__ : List[Any] = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase )
a__ : Optional[int] = AutoencoderKL(**__UpperCamelCase )
vae.load_state_dict(__UpperCamelCase )
vae.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 710 |
import string
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> str:
a__ : Optional[int] = ""
for i in sequence:
a__ : List[str] = ord(__UpperCamelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> str:
a__ : Dict = string.ascii_letters
a__ : Optional[Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(__UpperCamelCase )] if c in letters else c for c in sequence )
def SCREAMING_SNAKE_CASE( ) -> None:
from timeit import timeit
print("Running performance benchmarks..." )
a__ : Optional[Any] = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=__UpperCamelCase )} seconds' )
print(F'> atbash(): {timeit("atbash(printable)" , setup=__UpperCamelCase )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 207 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A_ ( _a ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class A_ ( _a , _a ):
lowerCAmelCase__ = 1
@register_to_config
def __init__( self: List[Any] ,__lowerCAmelCase: int = 2_000 ,__lowerCAmelCase: float = 0.15 ,__lowerCAmelCase: float = 0.01 ,__lowerCAmelCase: float = 13_48.0 ,__lowerCAmelCase: float = 1e-5 ,__lowerCAmelCase: int = 1 ,):
'''simple docstring'''
_lowerCamelCase : int = sigma_max
# setable values
_lowerCamelCase : Tuple = None
self.set_sigmas(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[int] = None ):
'''simple docstring'''
return sample
def _lowercase ( self: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: float = None ,__lowerCAmelCase: Union[str, torch.device] = None ):
'''simple docstring'''
_lowerCamelCase : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_lowerCamelCase : str = torch.linspace(1 ,__lowerCAmelCase ,__lowerCAmelCase ,device=__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: int ,__lowerCAmelCase: float = None ,__lowerCAmelCase: float = None ,__lowerCAmelCase: float = None ):
'''simple docstring'''
_lowerCamelCase : List[str] = sigma_min if sigma_min is not None else self.config.sigma_min
_lowerCamelCase : List[Any] = sigma_max if sigma_max is not None else self.config.sigma_max
_lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Dict = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_lowerCamelCase : str = torch.exp(torch.linspace(math.log(__lowerCAmelCase ) ,math.log(__lowerCAmelCase ) ,__lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device ) ) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device ) ,)
def _lowercase ( self: Tuple ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: int ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[torch.Generator] = None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
_lowerCamelCase : Optional[int] = timestep * torch.ones(
sample.shape[0] ,device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_lowerCamelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_lowerCamelCase : Any = timesteps.to(self.discrete_sigmas.device )
_lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device )
_lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(__lowerCAmelCase ,__lowerCAmelCase ).to(sample.device )
_lowerCamelCase : Optional[Any] = torch.zeros_like(__lowerCAmelCase )
_lowerCamelCase : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_lowerCamelCase : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_lowerCamelCase : Tuple = diffusion.unsqueeze(-1 )
_lowerCamelCase : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_lowerCamelCase : str = randn_tensor(
sample.shape ,layout=sample.layout ,generator=__lowerCAmelCase ,device=sample.device ,dtype=sample.dtype )
_lowerCamelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_lowerCamelCase : Optional[int] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCAmelCase ,prev_sample_mean=__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: Optional[torch.Generator] = None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_lowerCamelCase : Optional[Any] = randn_tensor(sample.shape ,layout=sample.layout ,generator=__lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_lowerCamelCase : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] ,-1 ) ,dim=-1 ).mean()
_lowerCamelCase : List[Any] = torch.norm(noise.reshape(noise.shape[0] ,-1 ) ,dim=-1 ).mean()
_lowerCamelCase : Optional[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_lowerCamelCase : Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_lowerCamelCase : Dict = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_lowerCamelCase : Optional[int] = step_size.unsqueeze(-1 )
_lowerCamelCase : Any = sample + step_size * model_output
_lowerCamelCase : List[str] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCAmelCase )
def _lowercase ( self: List[str] ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,__lowerCAmelCase: torch.FloatTensor ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = timesteps.to(original_samples.device )
_lowerCamelCase : str = self.discrete_sigmas.to(original_samples.device )[timesteps]
_lowerCamelCase : Optional[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCAmelCase ) * sigmas[:, None, None, None]
)
_lowerCamelCase : Optional[Any] = noise + original_samples
return noisy_samples
def __len__( self: List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps | 46 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : Optional[int] = DistilBertTokenizer
__a : Union[str, Any] = DistilBertTokenizerFast
__a : Any = True
@slow
def _UpperCAmelCase ( self ):
UpperCamelCase_ : List[Any] = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
UpperCamelCase_ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowerCAmelCase )
UpperCamelCase_ : int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
UpperCamelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 208 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ :
def __init__( self : str , _A : Any , _A : int=12 , _A : int=7 , _A : str=True , _A : str=True , _A : Union[str, Any]=True , _A : Optional[Any]=99 , _A : Optional[Any]=32 , _A : Union[str, Any]=32 , _A : Tuple=2 , _A : List[Any]=4 , _A : Optional[int]=37 , _A : Optional[int]=0.1 , _A : int=0.1 , _A : List[Any]=5_12 , _A : Dict=0.02 , _A : List[Any]=0 , _A : Tuple=None , ) -> Any:
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : int = use_input_mask
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Union[str, Any] = projection_dim
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Dict = dropout
UpperCAmelCase_ : Tuple = attention_dropout
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : Union[str, Any] = bos_token_id
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase_ : Dict = input_mask.numpy()
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = input_mask.shape
UpperCAmelCase_ : Union[str, Any] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def A ( self : Union[str, Any] ) -> Tuple:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def A ( self : Tuple , _A : Tuple , _A : str , _A : str ) -> Dict:
UpperCAmelCase_ : str = TFBlipTextModel(config=_A )
UpperCAmelCase_ : Optional[int] = model(_A , attention_mask=_A , training=_A )
UpperCAmelCase_ : Any = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : int ) -> Dict:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = (TFBlipTextModel,) if is_tf_available() else ()
a_ = False
a_ = False
a_ = False
def A ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : str = BlipTextModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self , config_class=_A , hidden_size=37 )
def A ( self : Tuple ) -> Optional[Any]:
self.config_tester.run_common_tests()
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Tuple ) -> List[str]:
pass
def A ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def A ( self : Tuple ) -> Tuple:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def A ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def A ( self : List[str] ) -> List[str]:
pass
@slow
def A ( self : str ) -> str:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : str = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def A ( self : str , _A : Any=True ) -> Dict:
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 216 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_UpperCamelCase : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216 | 1 |
'''simple docstring'''
from __future__ import annotations
snake_case_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __lowercase (_SCREAMING_SNAKE_CASE :list[list[int]] , _SCREAMING_SNAKE_CASE :list[int] , _SCREAMING_SNAKE_CASE :list[int] , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :list[list[int]] , ):
SCREAMING_SNAKE_CASE : int = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A_ ) )
] # the reference grid
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A_ ) )
] # the action grid
SCREAMING_SNAKE_CASE : Optional[Any] = init[0]
SCREAMING_SNAKE_CASE : Any = init[1]
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Any = g + heuristic[x][y] # cost from starting cell to destination cell
SCREAMING_SNAKE_CASE : Union[str, Any] = [[f, g, x, y]]
SCREAMING_SNAKE_CASE : List[Any] = False # flag that is set when search is complete
SCREAMING_SNAKE_CASE : int = False # flag set if we can't find expand
while not found and not resign:
if len(A_ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
SCREAMING_SNAKE_CASE : int = cell.pop()
SCREAMING_SNAKE_CASE : List[str] = next_cell[2]
SCREAMING_SNAKE_CASE : Union[str, Any] = next_cell[3]
SCREAMING_SNAKE_CASE : int = next_cell[1]
if x == goal[0] and y == goal[1]:
SCREAMING_SNAKE_CASE : Optional[Any] = True
else:
for i in range(len(A_ ) ): # to try out different valid actions
SCREAMING_SNAKE_CASE : str = x + DIRECTIONS[i][0]
SCREAMING_SNAKE_CASE : List[str] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(A_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
SCREAMING_SNAKE_CASE : Dict = g + cost
SCREAMING_SNAKE_CASE : List[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = i
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[Any] = goal[0]
SCREAMING_SNAKE_CASE : Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
SCREAMING_SNAKE_CASE : List[Any] = x - DIRECTIONS[action[x][y]][0]
SCREAMING_SNAKE_CASE : Tuple = y - DIRECTIONS[action[x][y]][1]
SCREAMING_SNAKE_CASE : List[str] = xa
SCREAMING_SNAKE_CASE : Union[str, Any] = ya
invpath.append([x, y] )
SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(len(A_ ) ):
path.append(invpath[len(A_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
snake_case_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
snake_case_ = [0, 0]
# all coordinates are given in format [y,x]
snake_case_ = [len(grid) - 1, len(grid[0]) - 1]
snake_case_ = 1
# the cost map which pushes the path closer to the goal
snake_case_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
snake_case_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
snake_case_ = 99
snake_case_ , snake_case_ = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 507 | import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __lowerCAmelCase ( A_ : Features ) -> Optional[int]:
__UpperCAmelCase = np.inf
def set_batch_size(A_ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(A_ , A_ ):
__UpperCAmelCase = min(A_ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(A_ , A_ ):
__UpperCAmelCase = min(A_ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(A_ , A_ ) and feature.dtype == "binary":
__UpperCAmelCase = min(A_ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(A_ , A_ )
return None if batch_size is np.inf else batch_size
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCAmelCase: NestedDataStructureLike[PathLike] , __lowerCAmelCase: Optional[NamedSplit] = None , __lowerCAmelCase: Optional[Features] = None , __lowerCAmelCase: str = None , __lowerCAmelCase: bool = False , __lowerCAmelCase: bool = False , __lowerCAmelCase: Optional[int] = None , **__lowerCAmelCase: List[Any] , ) -> Any:
'''simple docstring'''
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
__UpperCAmelCase = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths}
__UpperCAmelCase = _PACKAGED_DATASETS_MODULES["parquet"][1]
__UpperCAmelCase = Parquet(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , hash=__lowerCAmelCase , **__lowerCAmelCase , )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.streaming:
__UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
__UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: Optional[int] , __lowerCAmelCase: Dataset , __lowerCAmelCase: Union[PathLike, BinaryIO] , __lowerCAmelCase: Optional[int] = None , **__lowerCAmelCase: Optional[int] , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = dataset
__UpperCAmelCase = path_or_buf
__UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features )
__UpperCAmelCase = parquet_writer_kwargs
def _UpperCAmelCase ( self: Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
__UpperCAmelCase = self._write(file_obj=__lowerCAmelCase , batch_size=__lowerCAmelCase , **self.parquet_writer_kwargs )
else:
__UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__lowerCAmelCase , **self.parquet_writer_kwargs )
return written
def _UpperCAmelCase ( self: Tuple , __lowerCAmelCase: BinaryIO , __lowerCAmelCase: int , **__lowerCAmelCase: List[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = parquet_writer_kwargs.pop("path_or_buf" , __lowerCAmelCase )
__UpperCAmelCase = self.dataset.features.arrow_schema
__UpperCAmelCase = pq.ParquetWriter(__lowerCAmelCase , schema=__lowerCAmelCase , **__lowerCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __lowerCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
__UpperCAmelCase = query_table(
table=self.dataset._data , key=slice(__lowerCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written
| 221 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def snake_case ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Dict) -> List[str]:
_A = AutoConfig.from_pretrained(lowerCamelCase_)
_A = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_)
_A = checkpoints.load_tax_checkpoint(lowerCamelCase_)
_A = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
_A = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_A = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_A = 'TransientGlobalSelfAttention'
else:
raise ValueError(
"""Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`"""
""" attribute with a value from [\'local\', \'transient-global].""")
# Encoder
for layer_index in range(config.num_layers):
_A = F'''layers_{str(lowerCamelCase_)}'''
# Self-Attention
_A = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
_A = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
_A = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
_A = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_A = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
_A = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
_A = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
_A = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_A = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
_A = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_A = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_A = flax_model.params['encoder']['block'][str(lowerCamelCase_)]['layer']
_A = tax_attention_key
_A = tax_attention_out
_A = tax_attention_query
_A = tax_attention_value
_A = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_A = tax_global_layer_norm
if split_mlp_wi:
_A = tax_mlp_wi_a
_A = tax_mlp_wi_a
else:
_A = tax_mlp_wi
_A = tax_mlp_wo
_A = tax_mlp_layer_norm
_A = flax_model_encoder_layer_block
# Only for layer 0:
_A = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
_A = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_A = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
_A = tax_encoder_global_rel_embedding
# Assigning
_A = tax_model['target']['encoder']['encoder_norm']['scale']
_A = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
_A = F'''layers_{str(lowerCamelCase_)}'''
# Self-Attention
_A = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
_A = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
_A = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
_A = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
_A = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
_A = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
_A = tax_enc_dec_attention_module['key']['kernel']
_A = tax_enc_dec_attention_module['out']['kernel']
_A = tax_enc_dec_attention_module['query']['kernel']
_A = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
_A = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
_A = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
_A = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_A = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
_A = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_A = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_A = flax_model.params['decoder']['block'][str(lowerCamelCase_)]['layer']
_A = tax_attention_key
_A = tax_attention_out
_A = tax_attention_query
_A = tax_attention_value
_A = tax_pre_attention_layer_norm
_A = tax_enc_dec_attention_key
_A = tax_enc_dec_attention_out
_A = tax_enc_dec_attention_query
_A = tax_enc_dec_attention_value
_A = tax_cross_layer_norm
if split_mlp_wi:
_A = tax_mlp_wi_a
_A = tax_mlp_wi_a
else:
_A = tax_mlp_wi
_A = tax_mlp_wo
_A = txa_mlp_layer_norm
_A = flax_model_decoder_layer_block
# Decoder Normalization
_A = tax_model['target']['decoder']['decoder_norm']['scale']
_A = txa_decoder_norm
# Only for layer 0:
_A = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
_A = tax_decoder_rel_embedding
# Token Embeddings
_A = tax_model['target']['token_embedder']['embedding']
_A = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_A = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(lowerCamelCase_)
print("""T5X Model was sucessfully converted!""")
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 706 | def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
return int(input_a == input_a == 0)
def snake_case ( ) -> None:
print("""Truth Table of NOR Gate:""")
print("""| Input 1 | Input 2 | Output |""")
print(F'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(F'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(F'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(F'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 83 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# General docstring
_SCREAMING_SNAKE_CASE = 'MobileNetV1Config'
# Base docstring
_SCREAMING_SNAKE_CASE = 'google/mobilenet_v1_1.0_224'
_SCREAMING_SNAKE_CASE = [1, 1_0_2_4, 7, 7]
# Image classification docstring
_SCREAMING_SNAKE_CASE = 'google/mobilenet_v1_1.0_224'
_SCREAMING_SNAKE_CASE = 'tabby, tabby cat'
_SCREAMING_SNAKE_CASE = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = {}
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase = model.mobilenet_va
else:
UpperCamelCase = model
UpperCamelCase = """MobilenetV1/Conv2d_0/"""
UpperCamelCase = backbone.conv_stem.convolution.weight
UpperCamelCase = backbone.conv_stem.normalization.bias
UpperCamelCase = backbone.conv_stem.normalization.weight
UpperCamelCase = backbone.conv_stem.normalization.running_mean
UpperCamelCase = backbone.conv_stem.normalization.running_var
for i in range(13 ):
UpperCamelCase = i + 1
UpperCamelCase = i * 2
UpperCamelCase = backbone.layer[pt_index]
UpperCamelCase = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
UpperCamelCase = backbone.layer[pt_index + 1]
UpperCamelCase = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
UpperCamelCase = model.classifier.weight
UpperCamelCase = model.classifier.bias
return tf_to_pt_map
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
UpperCamelCase = tf.train.list_variables(_lowerCamelCase )
UpperCamelCase = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
UpperCamelCase = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase = array
# Build TF to PyTorch weights loading map
UpperCamelCase = _build_tf_to_pytorch_map(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
UpperCamelCase = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
UpperCamelCase = np.transpose(_lowerCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
UpperCamelCase = array.squeeze().transpose()
else:
UpperCamelCase = np.transpose(_lowerCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
UpperCamelCase = torch.from_numpy(_lowerCamelCase )
tf_weights.pop(_lowerCamelCase , _lowerCamelCase )
tf_weights.pop(name + """/RMSProp""" , _lowerCamelCase )
tf_weights.pop(name + """/RMSProp_1""" , _lowerCamelCase )
tf_weights.pop(name + """/ExponentialMovingAverage""" , _lowerCamelCase )
logger.info(f"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> torch.Tensor:
'''simple docstring'''
UpperCamelCase = features.shape[-2:]
UpperCamelCase = conv_layer.stride
UpperCamelCase = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase = max(kernel_height - stride_height , 0 )
else:
UpperCamelCase = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
UpperCamelCase = max(kernel_width - stride_width , 0 )
else:
UpperCamelCase = max(kernel_width - (in_width % stride_width) , 0 )
UpperCamelCase = pad_along_width // 2
UpperCamelCase = pad_along_width - pad_left
UpperCamelCase = pad_along_height // 2
UpperCamelCase = pad_along_height - pad_top
UpperCamelCase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowerCamelCase , _lowerCamelCase , """constant""" , 0.0 )
class SCREAMING_SNAKE_CASE_ ( nn.Module ):
def __init__( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Any = 1 , lowerCamelCase_ : str = 1 , lowerCamelCase_ : Optional[Any] = False , lowerCamelCase_ : List[str] = True , lowerCamelCase_ : Optional[Any] = True , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
UpperCamelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
UpperCamelCase = nn.Convad(
in_channels=a__ , out_channels=a__ , kernel_size=a__ , stride=a__ , padding=a__ , groups=a__ , bias=a__ , padding_mode="""zeros""" , )
if use_normalization:
UpperCamelCase = nn.BatchNormad(
num_features=a__ , eps=config.layer_norm_eps , momentum=0.9_9_9_7 , affine=a__ , track_running_stats=a__ , )
else:
UpperCamelCase = None
if use_activation:
if isinstance(a__ , a__ ):
UpperCamelCase = ACTaFN[use_activation]
elif isinstance(config.hidden_act , a__ ):
UpperCamelCase = ACTaFN[config.hidden_act]
else:
UpperCamelCase = config.hidden_act
else:
UpperCamelCase = None
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
if self.config.tf_padding:
UpperCamelCase = apply_tf_padding(a__ , self.convolution )
UpperCamelCase = self.convolution(a__ )
if self.normalization is not None:
UpperCamelCase = self.normalization(a__ )
if self.activation is not None:
UpperCamelCase = self.activation(a__ )
return features
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = MobileNetVaConfig
__lowerCAmelCase = load_tf_weights_in_mobilenet_va
__lowerCAmelCase = "mobilenet_v1"
__lowerCAmelCase = "pixel_values"
__lowerCAmelCase = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
if isinstance(a__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(a__ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_SCREAMING_SNAKE_CASE = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_SCREAMING_SNAKE_CASE = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , SCREAMING_SNAKE_CASE_ , )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict = True ):
"""simple docstring"""
super().__init__(a__ )
UpperCamelCase = config
UpperCamelCase = 32
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
UpperCamelCase = MobileNetVaConvLayer(
a__ , in_channels=config.num_channels , out_channels=a__ , kernel_size=3 , stride=2 , )
UpperCamelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase = nn.ModuleList()
for i in range(13 ):
UpperCamelCase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
a__ , in_channels=a__ , out_channels=a__ , kernel_size=3 , stride=strides[i] , groups=a__ , ) )
self.layer.append(
MobileNetVaConvLayer(
a__ , in_channels=a__ , out_channels=a__ , kernel_size=1 , ) )
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[str] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = None , ):
"""simple docstring"""
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
UpperCamelCase = self.conv_stem(a__ )
UpperCamelCase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
UpperCamelCase = layer_module(a__ )
if output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = hidden_states
if self.pooler is not None:
UpperCamelCase = torch.flatten(self.pooler(a__ ) , start_dim=1 )
else:
UpperCamelCase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a__ , pooler_output=a__ , hidden_states=a__ , )
@add_start_docstrings(
"""\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , SCREAMING_SNAKE_CASE_ , )
class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : int , lowerCamelCase_ : str ):
"""simple docstring"""
super().__init__(a__ )
UpperCamelCase = config.num_labels
UpperCamelCase = MobileNetVaModel(a__ )
UpperCamelCase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase = nn.Dropout(config.classifier_dropout_prob , inplace=a__ )
UpperCamelCase = nn.Linear(a__ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : int = None , ):
"""simple docstring"""
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.mobilenet_va(a__ , output_hidden_states=a__ , return_dict=a__ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier(self.dropout(a__ ) )
UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase = """single_label_classification"""
else:
UpperCamelCase = """multi_label_classification"""
if self.config.problem_type == "regression":
UpperCamelCase = MSELoss()
if self.num_labels == 1:
UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase = BCEWithLogitsLoss()
UpperCamelCase = loss_fct(a__ , a__ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=a__ , logits=a__ , hidden_states=outputs.hidden_states , )
| 537 | """simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a : int = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = GPTSwaTokenizer
_UpperCamelCase : Tuple = False
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Any = GPTSwaTokenizer(a__ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = """This is a test"""
_lowerCAmelCase : Optional[int] = """This is a test"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : List[Any] = """<s>"""
_lowerCAmelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(a__ ) , 2000 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def __A ( self ):
_lowerCAmelCase : Any = GPTSwaTokenizer(a__ )
_lowerCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [465, 287, 265, 631, 842] )
_lowerCAmelCase : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
a__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(a__ )
# fmt: off
self.assertListEqual(
a__ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def __A ( self ):
_lowerCAmelCase : Optional[Any] = GPTSwaTokenizer(a__ )
_lowerCAmelCase : str = ["""This is a test""", """I was born in 92000, and this is falsé."""]
_lowerCAmelCase : List[Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(a__ , a__ ):
self.assertListEqual(tokenizer.encode_fast(a__ ) , a__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(a__ , a__ ):
self.assertEqual(tokenizer.decode_fast(a__ ) , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : str = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
_lowerCAmelCase : List[Any] = {"""input_ids""": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=a__ , )
| 213 | 0 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''encodec'''
def __init__( self : Any , _A : Any=[1.5, 3.0, 6.0, 12.0, 24.0] , _A : List[str]=2_4000 , _A : str=1 , _A : List[Any]=False , _A : Optional[int]=None , _A : str=None , _A : int=128 , _A : Tuple=32 , _A : Optional[Any]=1 , _A : int=[8, 5, 4, 2] , _A : str="weight_norm" , _A : Optional[int]=7 , _A : Dict=7 , _A : str=3 , _A : Optional[Any]=2 , _A : List[Any]=True , _A : Optional[Any]="reflect" , _A : Any=2 , _A : int=2 , _A : Dict=1.0 , _A : List[str]=1024 , _A : Optional[Any]=None , _A : Union[str, Any]=True , **_A : List[str] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
__SCREAMING_SNAKE_CASE : int = sampling_rate
__SCREAMING_SNAKE_CASE : List[Any] = audio_channels
__SCREAMING_SNAKE_CASE : Any = normalize
__SCREAMING_SNAKE_CASE : int = chunk_length_s
__SCREAMING_SNAKE_CASE : List[str] = overlap
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_filters
__SCREAMING_SNAKE_CASE : Tuple = num_residual_layers
__SCREAMING_SNAKE_CASE : Optional[int] = upsampling_ratios
__SCREAMING_SNAKE_CASE : Optional[int] = norm_type
__SCREAMING_SNAKE_CASE : Any = kernel_size
__SCREAMING_SNAKE_CASE : Optional[int] = last_kernel_size
__SCREAMING_SNAKE_CASE : Tuple = residual_kernel_size
__SCREAMING_SNAKE_CASE : Optional[Any] = dilation_growth_rate
__SCREAMING_SNAKE_CASE : List[str] = use_causal_conv
__SCREAMING_SNAKE_CASE : Any = pad_mode
__SCREAMING_SNAKE_CASE : Optional[int] = compress
__SCREAMING_SNAKE_CASE : int = num_lstm_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = trim_right_ratio
__SCREAMING_SNAKE_CASE : Dict = codebook_size
__SCREAMING_SNAKE_CASE : Any = codebook_dim if codebook_dim is not None else hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_A )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 700 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""DeiTFeatureExtractor"""]
lowercase_ = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 131 | 0 |
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = """"""
snake_case_ : Optional[int] = """"""
snake_case_ : Optional[int] = []
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> int:
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
snake_case_ : Dict = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
snake_case_ : List[Any] = self.__min_dist_top_down_dp(_lowercase , n - 1 )
snake_case_ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , _lowercase )
snake_case_ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
snake_case_ : List[str] = 1 + min(_lowercase , _lowercase , _lowercase )
return self.dp[m][n]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = worda
snake_case_ : int = worda
snake_case_ : List[str] = [[-1 for _ in range(len(_lowercase ) )] for _ in range(len(_lowercase ) )]
return self.__min_dist_top_down_dp(len(_lowercase ) - 1 , len(_lowercase ) - 1 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> int:
'''simple docstring'''
snake_case_ : str = worda
snake_case_ : List[str] = worda
snake_case_ : Any = len(_lowercase )
snake_case_ : str = len(_lowercase )
snake_case_ : str = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
snake_case_ : List[str] = j
elif j == 0: # second string is empty
snake_case_ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
snake_case_ : List[str] = self.dp[i - 1][j - 1]
else:
snake_case_ : Any = self.dp[i][j - 1]
snake_case_ : Tuple = self.dp[i - 1][j]
snake_case_ : int = self.dp[i - 1][j - 1]
snake_case_ : Tuple = 1 + min(_lowercase , _lowercase , _lowercase )
return self.dp[m][n]
if __name__ == "__main__":
__lowerCAmelCase : Tuple = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
__lowerCAmelCase : str = input('''Enter the first string: ''').strip()
__lowerCAmelCase : Optional[int] = input('''Enter the second string: ''').strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''')
| 58 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase : int = TypeVar('''KT''')
__lowerCAmelCase : Union[str, Any] = TypeVar('''VT''')
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = "root" , _lowercase = None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = key
snake_case_ : Tuple = value
snake_case_ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
'''simple docstring'''
return f'Node({self.key}: {self.value})'
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _lowercase = 0.5 , _lowercase = 1_6 ) -> int:
'''simple docstring'''
snake_case_ : Node[KT, VT] = Node[KT, VT]()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = p
snake_case_ : Any = max_level
def __str__( self ) -> str:
'''simple docstring'''
snake_case_ : str = list(self )
if len(_lowercase ) == 0:
return f'SkipList(level={self.level})'
snake_case_ : List[Any] = max((len(str(_lowercase ) ) for item in items) , default=4 )
snake_case_ : str = max(_lowercase , 4 ) + 4
snake_case_ : Union[str, Any] = self.head
snake_case_ : Dict = []
snake_case_ : List[str] = node.forward.copy()
lines.append(f'[{node.key}]'.ljust(_lowercase , """-""" ) + """* """ * len(_lowercase ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
while len(node.forward ) != 0:
snake_case_ : Optional[Any] = node.forward[0]
lines.append(
f'[{node.key}]'.ljust(_lowercase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_lowercase ) )
snake_case_ : List[str] = node.forward
lines.append("""None""".ljust(_lowercase ) + """* """ * len(_lowercase ) )
return f'SkipList(level={self.level})\n' + "\n".join(_lowercase )
def __iter__( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
snake_case_ : Dict = node.forward[0]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase__ ( self , _lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
snake_case_ : List[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase__ ( self , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
snake_case_ : List[str] = node.forward[i]
else:
snake_case_ : Tuple = update_node.forward[:i]
def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
snake_case_ : Any = level
snake_case_ : Optional[int] = Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
snake_case_ : Optional[Any] = new_node
def UpperCAmelCase__ ( self , _lowercase ) -> VT | None:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 1_2 )
skip_list.insert("""Key3""" , 4_1 )
skip_list.insert("""Key4""" , -1_9 )
snake_case_ : Optional[int] = skip_list.head
snake_case_ : List[Any] = {}
while node.level != 0:
snake_case_ : List[str] = node.forward[0]
snake_case_ : Union[str, Any] = node.value
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_0 )
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 1_0 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 1_0 )
snake_case_ : str = skip_list.head
snake_case_ : str = {}
while node.level != 0:
snake_case_ : Optional[Any] = node.forward[0]
snake_case_ : int = node.value
if len(__UpperCamelCase ) != 4:
print()
assert len(__UpperCamelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = SkipList()
assert skip_list.find("""Some key""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = SkipList()
skip_list.insert("""Key2""" , 2_0 )
assert skip_list.find("""Key2""" ) == 2_0
skip_list.insert("""Some Key""" , 1_0 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 1_3 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 1_0
assert skip_list.find("""V""" ) == 1_3
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 1_4
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 1_2
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 1_5
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 1_2 )
skip_list.insert("""V""" , 1_3 )
skip_list.insert("""X""" , 1_4_2 )
skip_list.insert("""Key2""" , 1_5 )
skip_list.delete("""X""" )
def traverse_keys(__UpperCamelCase : str ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __lowerCAmelCase ( ):
'''simple docstring'''
def is_sorted(__UpperCamelCase : List[Any] ):
return all(next_item >= item for item, next_item in zip(__UpperCamelCase , lst[1:] ) )
snake_case_ : str = SkipList()
for i in range(1_0 ):
skip_list.insert(__UpperCamelCase , __UpperCamelCase )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__UpperCamelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__UpperCamelCase ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Dict = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 58 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
# TODO Update this
a_ : List[str] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = 'esm'
def __init__(self , __a=None , __a=None , __a=None , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a=0.1 , __a=0.1 , __a=10_26 , __a=0.02 , __a=1E-12 , __a="absolute" , __a=True , __a=None , __a=False , __a=False , __a=None , __a=None , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , mask_token_id=__a , **__a )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = use_cache
lowerCamelCase = emb_layer_norm_before
lowerCamelCase = token_dropout
lowerCamelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowerCamelCase = EsmFoldConfig()
elif isinstance(__a , __a ):
lowerCamelCase = EsmFoldConfig(**__a )
lowerCamelCase = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowerCamelCase = get_default_vocab_list()
else:
lowerCamelCase = vocab_list
else:
lowerCamelCase = None
lowerCamelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , __a ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _a (self ):
'''simple docstring'''
lowerCamelCase = super().to_dict()
if isinstance(self.esmfold_config , __a ):
lowerCamelCase = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = None
_A = True
_A = False
_A = False
_A = False
_A = 0
_A = True
_A = False
_A = 1_28
_A = None
def _a (self ):
'''simple docstring'''
if self.trunk is None:
lowerCamelCase = TrunkConfig()
elif isinstance(self.trunk , __a ):
lowerCamelCase = TrunkConfig(**self.trunk )
def _a (self ):
'''simple docstring'''
lowerCamelCase = asdict(self )
lowerCamelCase = self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 48
_A = 10_24
_A = 1_28
_A = 32
_A = 32
_A = 32
_A = 0
_A = 0
_A = False
_A = 4
_A = 1_28
_A = None
def _a (self ):
'''simple docstring'''
if self.structure_module is None:
lowerCamelCase = StructureModuleConfig()
elif isinstance(self.structure_module , __a ):
lowerCamelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowerCamelCase = self.sequence_state_dim // self.sequence_head_width
lowerCamelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _a (self ):
'''simple docstring'''
lowerCamelCase = asdict(self )
lowerCamelCase = self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 3_84
_A = 1_28
_A = 16
_A = 1_28
_A = 12
_A = 4
_A = 8
_A = 0.1
_A = 8
_A = 1
_A = 2
_A = 7
_A = 10
_A = 1e-8
_A = 1e5
def _a (self ):
'''simple docstring'''
return asdict(self )
def __lowercase( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
) | 484 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
return x + 2
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = "x = 3"
lowerCamelCase = {}
lowerCamelCase = evaluate(__a , {} , state=__a )
assert result == 3
self.assertDictEqual(__a , {"x": 3} )
lowerCamelCase = "x = y"
lowerCamelCase = {"y": 5}
lowerCamelCase = evaluate(__a , {} , state=__a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__a , {"x": 5, "y": 5} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "y = add_two(x)"
lowerCamelCase = {"x": 3}
lowerCamelCase = evaluate(__a , {"add_two": add_two} , state=__a )
assert result == 5
self.assertDictEqual(__a , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCamelCase = evaluate(__a , {} , state=__a )
assert result is None
assert "tried to execute add_two" in out.out
def _a (self ):
'''simple docstring'''
lowerCamelCase = "x = 3"
lowerCamelCase = {}
lowerCamelCase = evaluate(__a , {} , state=__a )
assert result == 3
self.assertDictEqual(__a , {"x": 3} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "test_dict = {'x': x, 'y': add_two(x)}"
lowerCamelCase = {"x": 3}
lowerCamelCase = evaluate(__a , {"add_two": add_two} , state=__a )
self.assertDictEqual(__a , {"x": 3, "y": 5} )
self.assertDictEqual(__a , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "x = 3\ny = 5"
lowerCamelCase = {}
lowerCamelCase = evaluate(__a , {} , state=__a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__a , {"x": 3, "y": 5} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "text = f'This is x: {x}.'"
lowerCamelCase = {"x": 3}
lowerCamelCase = evaluate(__a , {} , state=__a )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__a , {"x": 3, "text": "This is x: 3."} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "if x <= 3:\n y = 2\nelse:\n y = 5"
lowerCamelCase = {"x": 3}
lowerCamelCase = evaluate(__a , {} , state=__a )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__a , {"x": 3, "y": 2} )
lowerCamelCase = {"x": 8}
lowerCamelCase = evaluate(__a , {} , state=__a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__a , {"x": 8, "y": 5} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "test_list = [x, add_two(x)]"
lowerCamelCase = {"x": 3}
lowerCamelCase = evaluate(__a , {"add_two": add_two} , state=__a )
self.assertListEqual(__a , [3, 5] )
self.assertDictEqual(__a , {"x": 3, "test_list": [3, 5]} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "y = x"
lowerCamelCase = {"x": 3}
lowerCamelCase = evaluate(__a , {} , state=__a )
assert result == 3
self.assertDictEqual(__a , {"x": 3, "y": 3} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "test_list = [x, add_two(x)]\ntest_list[1]"
lowerCamelCase = {"x": 3}
lowerCamelCase = evaluate(__a , {"add_two": add_two} , state=__a )
assert result == 5
self.assertDictEqual(__a , {"x": 3, "test_list": [3, 5]} )
lowerCamelCase = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
lowerCamelCase = {"x": 3}
lowerCamelCase = evaluate(__a , {"add_two": add_two} , state=__a )
assert result == 5
self.assertDictEqual(__a , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = "x = 0\nfor i in range(3):\n x = i"
lowerCamelCase = {}
lowerCamelCase = evaluate(__a , {"range": range} , state=__a )
assert result == 2
self.assertDictEqual(__a , {"x": 2, "i": 2} ) | 484 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.